Example #1
0
    def _custom_getter(getter=None, name=None, shape=None, dtype=dtypes.float32,  # pylint: disable=missing-docstring
                       initializer=None, regularizer=None, reuse=None,
                       trainable=True, collections=None, caching_device=None,  # pylint: disable=redefined-outer-name
                       partitioner=None, validate_shape=True,
                       use_resource=None):
      del getter, regularizer, collections, caching_device, partitioner
      del use_resource, validate_shape
      if name in self.tf_variables:
        if reuse:
          return self.tf_variables[name].initialized_value()
        else:
          raise ValueError("Specified reuse=%s but tried to reuse variables."
                           % reuse)
      # TODO(apassos): ensure this is on the same device as above
      v = _CapturedVariable(name, initializer, shape, dtype, trainable)
      self.variables[name] = v

      graph_mode_resource = resource_variable_ops.var_handle_op(
          shared_name=name, shape=shape, dtype=dtype)
      if initializer is None:
        initializer = _default_initializer(name, shape, dtype)
      resource_variable_ops.assign_variable_op(
          graph_mode_resource, initializer(shape, dtype))
      return _VariableFromResource(
          graph_mode_resource, dtype, name, shape=v.shape)
 def testAssignAdd(self):
     with self.test_session():
         handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
         resource_variable_ops.assign_variable_op(handle, constant_op.constant(1, dtype=dtypes.int32)).run()
         resource_variable_ops.assign_add_variable_op(handle, constant_op.constant(1, dtype=dtypes.int32)).run()
         read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
         self.assertEqual(read.eval(), 2)
 def testDtypeSurvivesIdentity(self):
   with self.test_session():
     handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
     id_handle = array_ops.identity(handle)
     resource_variable_ops.assign_variable_op(id_handle,
                                              constant_op.constant(
                                                  0,
                                                  dtype=dtypes.int32)).run()
 def testCreateRead(self):
   with self.test_session():
     handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
     resource_variable_ops.assign_variable_op(
         handle, constant_op.constant(1, dtype=dtypes.int32)).run()
     value = resource_variable_ops.read_variable_op(
         handle, dtype=dtypes.int32).eval()
     self.assertAllEqual(1, value)
Example #5
0
 def assign_fn():
   with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
     resource_variable_ops.assign_variable_op(
         self._handle,
         initial_value,
         name=n)
     # Returning values to keep tf.cond happy.
   return ops.convert_to_tensor(1)
 def testScatterAdd(self):
     with self.test_session():
         handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[1, 1])
         resource_variable_ops.assign_variable_op(handle, constant_op.constant([[1]], dtype=dtypes.int32)).run()
         resource_variable_ops.resource_scatter_add(
             handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)
         ).run()
         read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
         self.assertEqual(read.eval(), [[3]])
 def testReadVariableDtypeMismatchEager(self):
   with context.eager_mode():
     handle = resource_variable_ops.var_handle_op(
         dtype=dtypes.int32, shape=[1], name="foo")
     resource_variable_ops.assign_variable_op(handle, 1)
     with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                  "Trying to read variable with wrong dtype. "
                                  "Expected float got int32."):
       _ = resource_variable_ops.read_variable_op(handle, dtype=dtypes.float32)
 def testAssignVariableDtypeMismatchEager(self):
   with context.eager_mode():
     handle = resource_variable_ops.var_handle_op(
         dtype=dtypes.int32, shape=[1], name="foo")
     resource_variable_ops.assign_variable_op(
         handle, constant_op.constant([1]))
     with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                  "Trying to assign variable with wrong "
                                  "dtype. Expected int32 got float."):
       resource_variable_ops.assign_variable_op(
           handle, constant_op.constant([1.], dtype=dtypes.float32))
 def testManyAssigns(self):
     with self.test_session() as session:
         handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
         create = resource_variable_ops.assign_variable_op(handle, constant_op.constant(1, dtype=dtypes.int32))
         with ops.control_dependencies([create]):
             first_read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
         with ops.control_dependencies([first_read]):
             write = resource_variable_ops.assign_variable_op(handle, constant_op.constant(2, dtype=dtypes.int32))
         with ops.control_dependencies([write]):
             second_read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
         f, s = session.run([first_read, second_read])
         self.assertEqual(f, 1)
         self.assertEqual(s, 2)
 def testCreateRead(self):
   handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
   self.evaluate(resource_variable_ops.assign_variable_op(
       handle, constant_op.constant(1, dtype=dtypes.int32)))
   value = self.evaluate(
       resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
   self.assertAllEqual(1, value)
 def testScatterAdd(self):
   handle = resource_variable_ops.var_handle_op(
       dtype=dtypes.int32, shape=[1, 1])
   self.evaluate(resource_variable_ops.assign_variable_op(
       handle, constant_op.constant([[1]], dtype=dtypes.int32)))
   self.evaluate(resource_variable_ops.resource_scatter_add(
       handle, [0], constant_op.constant([[2]], dtype=dtypes.int32)))
   read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
   self.assertEqual(self.evaluate(read), [[3]])
 def testAssignAdd(self):
   handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
   self.evaluate(resource_variable_ops.assign_variable_op(
       handle, constant_op.constant(1, dtype=dtypes.int32)))
   self.evaluate(resource_variable_ops.assign_add_variable_op(
       handle, constant_op.constant(1, dtype=dtypes.int32)))
   read = self.evaluate(
       resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
   self.assertEqual(read, 2)
 def testScatterUpdateString(self):
   handle = resource_variable_ops.var_handle_op(
       dtype=dtypes.string, shape=[1, 1])
   self.evaluate(resource_variable_ops.assign_variable_op(
       handle, constant_op.constant([["a"]], dtype=dtypes.string)))
   self.evaluate(resource_variable_ops.resource_scatter_update(
       handle, [0], constant_op.constant([["b"]], dtype=dtypes.string)))
   read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.string)
   self.assertEqual(compat.as_bytes(self.evaluate(read)[0][0]),
                    compat.as_bytes("b"))
 def testScatterDiv(self):
     handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32,
                                                  shape=[1, 1])
     self.evaluate(
         resource_variable_ops.assign_variable_op(
             handle, constant_op.constant([[6]], dtype=dtypes.int32)))
     self.evaluate(
         resource_variable_ops.resource_scatter_div(
             handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
     read = resource_variable_ops.read_variable_op(handle,
                                                   dtype=dtypes.int32)
     self.assertEqual(self.evaluate(read), [[2]])
 def testAssignAdd(self):
     handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32,
                                                  shape=[])
     self.evaluate(
         resource_variable_ops.assign_variable_op(
             handle, constant_op.constant(1, dtype=dtypes.int32)))
     self.evaluate(
         resource_variable_ops.assign_add_variable_op(
             handle, constant_op.constant(1, dtype=dtypes.int32)))
     read = self.evaluate(
         resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32))
     self.assertEqual(read, 2)
 def testScatterDiv(self):
   with self.test_session() as sess, self.test_scope():
     handle = resource_variable_ops.var_handle_op(
         dtype=dtypes.int32, shape=[1, 1])
     sess.run(
         resource_variable_ops.assign_variable_op(
             handle, constant_op.constant([[6]], dtype=dtypes.int32)))
     sess.run(
         resource_variable_ops.resource_scatter_div(
             handle, [0], constant_op.constant([[3]], dtype=dtypes.int32)))
     read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
     self.assertAllEqual(sess.run(read), [[2]])
 def testScatterSub(self):
   with self.test_session() as sess, self.test_scope():
     handle = resource_variable_ops.var_handle_op(
         dtype=dtypes.int32, shape=[2, 1])
     sess.run(
         resource_variable_ops.assign_variable_op(
             handle, constant_op.constant([[4], [1]], dtype=dtypes.int32)))
     sess.run(
         resource_variable_ops.resource_scatter_sub(
             handle, [1], constant_op.constant([[2]], dtype=dtypes.int32)))
     read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
     self.assertAllEqual(self.evaluate(read), [[4], [-1]])
 def testScatterMaxScalar(self):
   with self.session() as sess, self.test_scope():
     handle = resource_variable_ops.var_handle_op(
         dtype=dtypes.int32, shape=[1, 1])
     sess.run(
         resource_variable_ops.assign_variable_op(
             handle, constant_op.constant([[6]], dtype=dtypes.int32)))
     sess.run(
         resource_variable_ops.resource_scatter_max(
             handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
     read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
     self.assertEqual(self.evaluate(read), [[6]])
 def testScatterSub(self):
   with self.session() as sess, self.test_scope():
     handle = resource_variable_ops.var_handle_op(
         dtype=dtypes.int32, shape=[2, 1])
     sess.run(
         resource_variable_ops.assign_variable_op(
             handle, constant_op.constant([[4], [1]], dtype=dtypes.int32)))
     sess.run(
         resource_variable_ops.resource_scatter_sub(
             handle, [1], constant_op.constant([[2]], dtype=dtypes.int32)))
     read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
     self.assertAllEqual(self.evaluate(read), [[4], [-1]])
Example #20
0
 def testScatterMulScalar(self):
     handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32,
                                                  shape=[1, 1])
     self.evaluate(
         resource_variable_ops.assign_variable_op(
             handle, constant_op.constant([[1]], dtype=dtypes.int32)))
     self.evaluate(
         resource_variable_ops.resource_scatter_mul(
             handle, [0], constant_op.constant(5, dtype=dtypes.int32)))
     read = resource_variable_ops.read_variable_op(handle,
                                                   dtype=dtypes.int32)
     self.assertEqual(self.evaluate(read), [[5]])
 def testScatterSubScalar(self):
   with self.test_session() as sess, self.test_scope():
     handle = resource_variable_ops.var_handle_op(
         dtype=dtypes.int32, shape=[1, 1])
     sess.run(
         resource_variable_ops.assign_variable_op(
             handle, constant_op.constant([[1]], dtype=dtypes.int32)))
     sess.run(
         resource_variable_ops.resource_scatter_sub(
             handle, [0], constant_op.constant(2, dtype=dtypes.int32)))
     read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
     self.assertEqual(sess.run(read), [[-1]])
 def testScatterMaxScalar(self):
   with self.test_session() as sess, self.test_scope():
     handle = resource_variable_ops.var_handle_op(
         dtype=dtypes.int32, shape=[1, 1])
     sess.run(
         resource_variable_ops.assign_variable_op(
             handle, constant_op.constant([[6]], dtype=dtypes.int32)))
     sess.run(
         resource_variable_ops.resource_scatter_max(
             handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
     read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
     self.assertEqual(self.evaluate(read), [[6]])
 def testScatterUpdateStringScalar(self):
     handle = resource_variable_ops.var_handle_op(dtype=dtypes.string,
                                                  shape=[1, 1])
     self.evaluate(
         resource_variable_ops.assign_variable_op(
             handle, constant_op.constant([["a"]], dtype=dtypes.string)))
     self.evaluate(
         resource_variable_ops.resource_scatter_update(
             handle, [0], constant_op.constant("b", dtype=dtypes.string)))
     read = resource_variable_ops.read_variable_op(handle,
                                                   dtype=dtypes.string)
     self.assertEqual(compat.as_bytes(self.evaluate(read)[0][0]),
                      compat.as_bytes("b"))
 def testScatterMaxScalar(self):
     with ops.device("cpu:0"):
         handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32,
                                                      shape=[1, 1])
         self.evaluate(
             resource_variable_ops.assign_variable_op(
                 handle, constant_op.constant([[6]], dtype=dtypes.int32)))
         self.evaluate(
             resource_variable_ops.resource_scatter_max(
                 handle, [0], constant_op.constant(3, dtype=dtypes.int32)))
         read = resource_variable_ops.read_variable_op(handle,
                                                       dtype=dtypes.int32)
         self.assertEqual(self.evaluate(read), [[6]])
 def testHandleDtypeShapeMatch(self):
     with self.test_session():
         handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
         with self.assertRaises(ValueError):
             resource_variable_ops.assign_variable_op(handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
         with self.assertRaises(ValueError):
             resource_variable_ops.assign_variable_op(handle, constant_op.constant([0], dtype=dtypes.int32)).run()
         resource_variable_ops.assign_variable_op(handle, constant_op.constant(0, dtype=dtypes.int32)).run()
Example #26
0
 def testScatterNdAddOps(self):
   with self.session() as sess, self.test_scope():
     handle = resource_variable_ops.var_handle_op(
         dtype=dtypes.float32, shape=[8])
     sess.run(
         resource_variable_ops.assign_variable_op(
             handle, constant_op.constant([1] * 8, dtype=dtypes.float32)))
     indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
     updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
     expected = np.array([1, 12, 1, 11, 10, 1, 1, 13])
     sess.run(gen_state_ops.resource_scatter_nd_add(handle, indices, updates))
     read = resource_variable_ops.read_variable_op(
         handle, dtype=dtypes.float32)
     self.assertAllClose(expected, self.evaluate(read))
 def testScatterNdAddOps(self):
   with self.test_session() as sess, self.test_scope():
     handle = resource_variable_ops.var_handle_op(
         dtype=dtypes.float32, shape=[8])
     sess.run(
         resource_variable_ops.assign_variable_op(
             handle, constant_op.constant([1] * 8, dtype=dtypes.float32)))
     indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
     updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
     expected = np.array([1, 12, 1, 11, 10, 1, 1, 13])
     sess.run(gen_state_ops.resource_scatter_nd_add(handle, indices, updates))
     read = resource_variable_ops.read_variable_op(
         handle, dtype=dtypes.float32)
     self.assertAllClose(expected, self.evaluate(read))
Example #28
0
        def _custom_getter(
                getter=None,
                name=None,
                shape=None,
                dtype=dtypes.float32,  # pylint: disable=missing-docstring
                initializer=None,
                regularizer=None,
                reuse=None,
                trainable=True,
                collections=None,
                caching_device=None,  # pylint: disable=redefined-outer-name
                partitioner=None,
                validate_shape=True,
                use_resource=None):
            del getter, regularizer, collections, caching_device, partitioner
            del use_resource, validate_shape
            if name in self.tf_variables:
                if reuse:
                    return self.tf_variables[name].initialized_value()
                else:
                    raise ValueError(
                        "Specified reuse=%s but tried to reuse variables." %
                        reuse)
            # TODO(apassos): ensure this is on the same device as above
            v = _CapturedVariable(name, initializer, shape, dtype, trainable)
            self.variables[name] = v

            graph_mode_resource = resource_variable_ops.var_handle_op(
                shared_name=name, shape=shape, dtype=dtype)
            if initializer is None:
                initializer = _default_initializer(name, shape, dtype)
            resource_variable_ops.assign_variable_op(graph_mode_resource,
                                                     initializer(shape, dtype))
            return _VariableFromResource(graph_mode_resource,
                                         dtype,
                                         name,
                                         shape=v.shape)
 def testScatterMin(self):
   with ops.device("cpu:0"):
     handle = resource_variable_ops.var_handle_op(
         dtype=dtypes.int32, shape=[1, 1])
     self.evaluate(
         resource_variable_ops.assign_variable_op(handle,
                                                  constant_op.constant(
                                                      [[6]],
                                                      dtype=dtypes.int32)))
     self.evaluate(
         resource_variable_ops.resource_scatter_min(handle, [0],
                                                    constant_op.constant(
                                                        [[3]],
                                                        dtype=dtypes.int32)))
     read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
     self.assertEqual(self.evaluate(read), [[3]])
Example #30
0
 def testHandleDtypeShapeMatch(self):
   with self.test_session():
     handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
     with self.assertRaises(ValueError):
       resource_variable_ops.assign_variable_op(
           handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
     with self.assertRaises(ValueError):
       resource_variable_ops.assign_variable_op(
           handle, constant_op.constant([0], dtype=dtypes.int32)).run()
     resource_variable_ops.assign_variable_op(
         handle, constant_op.constant(0, dtype=dtypes.int32)).run()
 def testManyAssigns(self):
     with self.test_session() as session:
         handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32,
                                                      shape=[])
         create = resource_variable_ops.create_variable_op(
             handle, constant_op.constant(1, dtype=dtypes.int32))
         with ops.control_dependencies([create]):
             first_read = resource_variable_ops.read_variable_op(
                 handle, dtype=dtypes.int32)
         with ops.control_dependencies([first_read]):
             write = resource_variable_ops.assign_variable_op(
                 handle, constant_op.constant(2, dtype=dtypes.int32))
         with ops.control_dependencies([write]):
             second_read = resource_variable_ops.read_variable_op(
                 handle, dtype=dtypes.int32)
         f, s = session.run([first_read, second_read])
         self.assertEqual(f, 1)
         self.assertEqual(s, 2)
 def testDtypeSurvivesIdentity(self):
   handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
   id_handle = array_ops.identity(handle)
   self.evaluate(resource_variable_ops.assign_variable_op(
       id_handle, constant_op.constant(0, dtype=dtypes.int32)))
Example #33
0
  def __init__(self,  # pylint: disable=super-init-not-called
               initial_value=None,
               trainable=None,
               caching_device=None,
               name=None,
               dtype=None,
               constraint=None,
               add_initializers_to=None,
               lifted_initializer_graph=None,
               **unused_kwargs):
    """Creates a variable.

    Args:
      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
        which is the initial value for the Variable. The initial value must have
        a shape specified unless `validate_shape` is set to False. Can also be a
        callable with no argument that returns the initial value when called.
        (Note that initializer functions from init_ops.py must first be bound
         to a shape before being used here.)
      trainable: If `True`, GradientTapes automatically watch uses of this
        Variable.
      caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.  If not `None`, caches on another device.  Typical use is to
        cache on the device where the Ops using the Variable reside, to
        deduplicate copying through `Switch` and other conditional statements.
      name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.
      dtype: If set, initial_value will be converted to the given type.
        If None, either the datatype will be kept (if initial_value is
       a Tensor) or float32 will be used (if it is a Python object convertible
       to a Tensor).
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value
        (which must have the same shape). Constraints are not safe to
        use when doing asynchronous distributed training.
      add_initializers_to: if not None and not in legacy graph mode, the
        initializer tensor will be added to this map in addition to adding the
        assignment to the function.
      lifted_initializer_graph: FuncGraph to try to lift initializers to.

    Raises:
      ValueError: If the initial value is not specified, or does not have a
        shape and `validate_shape` is `True`.
      RuntimeError: If called outside of a function definition.
    """
    if not ops.inside_function():
      # If we've been init_scope()d out of the function definition nothing to do
      # here; we can't really do the capturing or conditional logic.
      resource_variable_ops.ResourceVariable.__init__(
          self, initial_value=initial_value, trainable=trainable,
          caching_device=caching_device, name=name, dtype=dtype,
          constraint=constraint)
      return
    with ops.init_scope():
      self._in_graph_mode = not context.executing_eagerly()
    if initial_value is None:
      raise ValueError("initial_value must be specified.")
    init_from_fn = callable(initial_value)

    if constraint is not None and not callable(constraint):
      raise ValueError("The `constraint` argument must be a callable.")

    if isinstance(initial_value, trackable.CheckpointInitialValue):
      self._maybe_initialize_trackable()
      self._update_uid = initial_value.checkpoint_position.restore_uid
      initial_value = initial_value.wrapped_value

    if trainable is None:
      trainable = True
    self._trainable = trainable
    self._save_slice_info = None
    self._initial_value = None
    self._initializer_op = None
    self._is_initialized_op = None
    self._graph_element = None
    self._cached_value = None
    # Store the graph key so optimizers know how to only retrieve variables from
    # this graph. Guaranteed to be the same as the eager graph_key.
    self._graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
    with ops.name_scope(name, "Variable", []
                        if init_from_fn else [initial_value]) as name:
      # pylint: disable=protected-access
      with ops.init_scope():
        handle_name = ops._name_from_scope_name(name)
        unique_id = "%s_%d" % (handle_name, ops.uid())
        shared_name = context.shared_name(unique_id)
      with ops.name_scope("Initializer"), ops.device(None):
        initial_value = ops.convert_to_tensor(
            initial_value() if init_from_fn else initial_value,
            name="initial_value", dtype=dtype)
      with ops.init_scope():
        self._handle = resource_variable_ops.eager_safe_variable_handle(
            initial_value=initial_value,
            shared_name=shared_name,
            name=name,
            graph_mode=self._in_graph_mode)
      self._shape = initial_value.shape
      self._unique_id = unique_id
      self._handle_name = handle_name + ":0"
      self._dtype = initial_value.dtype.base_dtype
      self._constraint = constraint
      assert initial_value is not None
      if self._in_graph_mode:
        with ops.init_scope():
          outer_graph = ops.get_default_graph()
        func_graph = ops.get_default_graph()
        function_placeholders = (
            func_graph.inputs + func_graph.internal_captures)
        placeholder_ops = set(
            [tensor.op for tensor in function_placeholders])
        lifted_initializer = lift_to_graph.lift_to_graph(
            [initial_value], outer_graph,
            disallowed_placeholders=placeholder_ops)[initial_value]
        with ops.init_scope():
          self._initial_value = lifted_initializer
          with ops.name_scope("IsInitialized"):
            self._is_initialized_op = (
                resource_variable_ops.var_is_initialized_op(self._handle))
          if initial_value is not None:
            with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
              self._initializer_op = resource_variable_ops.assign_variable_op(
                  self._handle, lifted_initializer, name=n)
          with ops.name_scope("Read"), ops.colocate_with(self._handle):
            # Manually assign reads to the handle's device to avoid log
            # messages.
            with ops.device(self._handle.device):
              value = self._read_variable_op()
            self._graph_element = value
          ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, self)
      else:
        if add_initializers_to is not None:
          add_initializers_to[self] = initial_value
        def assign_fn():
          with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
            resource_variable_ops.assign_variable_op(
                self._handle,
                initial_value,
                name=n)
            # Returning values to keep tf.cond happy.
          return ops.convert_to_tensor(1)
        def not_assign_fn():
          return ops.convert_to_tensor(0)
        # Note: this cond is always guaranteed to run because we're inside a
        # defun which will insert automatic control dependencies.
        control_flow_ops.cond(
            resource_variable_ops.var_is_initialized_op(self._handle),
            not_assign_fn, assign_fn)

    # After the handle has been created, set up a way to clean it up when
    # executing eagerly. We'll hold the only reference to the deleter, so that
    # when this object is garbage collected the deleter will be too. This
    # means ResourceVariables can be part of reference cycles without those
    # cycles being uncollectable.
    if not self._in_graph_mode:
      self._handle_deleter = resource_variable_ops.EagerResourceDeleter(
          handle=self._handle, handle_device=self._handle.device)
    self._cached_shape_as_list = None
Example #34
0
  def __init__(self,  # pylint: disable=super-init-not-called
               initial_value=None,
               trainable=True,
               name=None,
               dtype=None,
               constraint=None,
               initialize=True,
               **unused_kwargs):
    """Creates a variable.

    Args:
      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
        which is the initial value for the Variable. The initial value must have
        a shape specified unless `validate_shape` is set to False. Can also be a
        callable with no argument that returns the initial value when called.
        (Note that initializer functions from init_ops.py must first be bound
         to a shape before being used here.)
      trainable: If `True`, automatically watches this variable on GradientTape
        whenever it's used.
      name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.
      dtype: If set, initial_value will be converted to the given type.
        If None, either the datatype will be kept (if initial_value is
        a Tensor) or float32 will be used (if it is a Python object convertible
        to a Tensor).
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value
        (which must have the same shape). Constraints are not safe to
        use when doing asynchronous distributed training.
      initialize: if True, runs initialization in eager execution; leaves the
        variable uninitialized otherwise.

    Raises:
      ValueError: If the initial value is not specified, or does not have a
        shape and `validate_shape` is `True`.
    """
    if initial_value is None:
      raise ValueError("initial_value must be specified.")
    init_from_fn = callable(initial_value)

    if isinstance(initial_value, ops.Tensor) and hasattr(
        initial_value, "graph") and initial_value.graph.building_function:
      raise ValueError("Tensor-typed variable initializers must either be "
                       "wrapped in an init_scope or callable "
                       "(e.g., `tf.Variable(lambda : "
                       "tf.truncated_normal([10, 40]))`) when building "
                       "functions. Please file a feature request if this "
                       "restriction inconveniences you.")

    if constraint is not None and not callable(constraint):
      raise ValueError("The `constraint` argument must be a callable.")

    if isinstance(initial_value, trackable.CheckpointInitialValue):
      self._maybe_initialize_trackable()
      self._update_uid = initial_value.checkpoint_position.restore_uid
      initial_value = initial_value.wrapped_value

    self._trainable = trainable
    self._save_slice_info = None
    # Store the graph key so optimizers know how to only retrieve variables from
    # this graph.
    self._graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
    with ops.init_scope():
      self._in_graph_mode = not context.executing_eagerly()
      with ops.name_scope(name, "Variable", []
                          if init_from_fn else [initial_value]) as name:
        # pylint: disable=protected-access
        handle_name = ops.name_from_scope_name(name)
        shared_name = handle_name
        if init_from_fn:
          # Use attr_scope and device(None) to simulate the behavior of
          # colocate_with when the variable we want to colocate with doesn't
          # yet exist.
          if self._in_graph_mode:
            with ops.name_scope("Initializer"), ops.device(None):
              initial_value = ops.convert_to_tensor(
                  initial_value(), name="initial_value", dtype=dtype)
            self._handle = _eager_safe_variable_handle(
                shape=initial_value.get_shape(),
                dtype=initial_value.dtype.base_dtype,
                shared_name=shared_name,
                name=name,
                graph_mode=self._in_graph_mode)
            self._shape = initial_value.get_shape()
          else:
            initial_value = initial_value()
            with ops.name_scope("Initializer"):
              initial_value = ops.convert_to_tensor(
                  initial_value, name="initial_value", dtype=dtype)
            self._handle = _eager_safe_variable_handle(
                shape=initial_value.get_shape(),
                dtype=initial_value.dtype.base_dtype,
                shared_name=shared_name,
                name=name,
                graph_mode=False)
            self._shape = initial_value.get_shape()
        # pylint: enable=protected-access

        # Or get the initial value from a Tensor or Python object.
        else:
          with ops.name_scope("Initializer"):
            initial_value = ops.convert_to_tensor(
                initial_value, name="initial_value", dtype=dtype)
          # pylint: disable=protected-access
          if (self._in_graph_mode and initial_value is not None and
              initial_value.op._get_control_flow_context() is not None):
            raise ValueError(
                "Initializer for variable %s is from inside a control-flow "
                "construct, such as a loop or conditional. When creating a "
                "variable inside a loop or conditional, use a lambda as the "
                "initializer." % name)
          # pylint: enable=protected-access
          self._handle = _eager_safe_variable_handle(
              shape=initial_value.get_shape(),
              dtype=initial_value.dtype.base_dtype,
              shared_name=shared_name,
              name=name,
              graph_mode=self._in_graph_mode)
          self._shape = initial_value.get_shape()

        self._unique_id = shared_name
        self._initial_value = initial_value if self._in_graph_mode else None
        self._handle_name = handle_name + ":0"
        self._dtype = initial_value.dtype.base_dtype
        self._constraint = constraint

        if self._in_graph_mode:
          with ops.name_scope("IsInitialized"):
            self._is_initialized_op = (
                resource_variable_ops.var_is_initialized_op(self._handle))
          if initial_value is not None:
            with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
              self._initializer_op = (
                  resource_variable_ops.assign_variable_op(
                      self._handle,
                      self._try_guard_against_uninitialized_dependencies(
                          initial_value),
                      name=n))
          with ops.name_scope("Read"), ops.colocate_with(self._handle):
            # Manually assign reads to the handle's device to avoid log
            # messages.
            with ops.device(self._handle.device):
              value = self._read_variable_op()
            self._graph_element = value
            self._cached_value = None
        else:
          if initialize:
            resource_variable_ops.assign_variable_op(self._handle,
                                                     initial_value)
          self._is_initialized_op = None
          self._initializer_op = None
          self._graph_element = None
          self._cached_value = None

    self._handle_deleter = None
    self._cached_shape_as_list = None
 def testDtypeSurvivesIdentity(self):
   handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
   id_handle = array_ops.identity(handle)
   self.evaluate(resource_variable_ops.assign_variable_op(
       id_handle, constant_op.constant(0, dtype=dtypes.int32)))
Example #36
0
  def __init__(self,
               initial_value=None,
               trainable=None,
               caching_device=None,
               name=None,
               dtype=None,
               constraint=None,
               add_initializers_to=None,
               lifted_initializer_graph=None,
               synchronization=None,
               aggregation=None,
               **unused_kwargs):
    """Creates a variable.

    Args:
      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
        which is the initial value for the Variable. The initial value must have
        a shape specified unless `validate_shape` is set to False. Can also be a
        callable with no argument that returns the initial value when called.
        (Note that initializer functions from init_ops.py must first be bound
         to a shape before being used here.)
      trainable: If `True`, GradientTapes automatically watch uses of this
        Variable.
      caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.  If not `None`, caches on another device.  Typical use is to
        cache on the device where the Ops using the Variable reside, to
        deduplicate copying through `Switch` and other conditional statements.
      name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.
      dtype: If set, initial_value will be converted to the given type.
        If None, either the datatype will be kept (if initial_value is
       a Tensor) or float32 will be used (if it is a Python object convertible
       to a Tensor).
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value
        (which must have the same shape). Constraints are not safe to
        use when doing asynchronous distributed training.
      add_initializers_to: if not None and not in legacy graph mode, the
        initializer tensor will be added to this map in addition to adding the
        assignment to the function.
      lifted_initializer_graph: FuncGraph to try to lift initializers to.
      synchronization: Indicates when a distributed a variable will be
        aggregated. Accepted values are constants defined in the class
        `tf.VariableSynchronization`. By default the synchronization is set to
        `AUTO` and the current `DistributionStrategy` chooses
        when to synchronize. If `synchronization` is set to `ON_READ`,
        `trainable` must not be set to `True`.
      aggregation: Indicates how a distributed variable will be aggregated.
        Accepted values are constants defined in the class
        `tf.VariableAggregation`.

    Raises:
      ValueError: If the initial value is not specified, or does not have a
        shape and `validate_shape` is `True`.
      RuntimeError: If called outside of a function definition.
    """
    if not ops.inside_function():
      # If we've been init_scope()d out of the function definition nothing to do
      # here; we can't really do the capturing or conditional logic.
      resource_variable_ops.ResourceVariable.__init__(
          self, initial_value=initial_value, trainable=trainable,
          caching_device=caching_device, name=name, dtype=dtype,
          constraint=constraint)
      return
    if initial_value is None:
      raise ValueError("initial_value must be specified.")
    init_from_fn = callable(initial_value)

    if constraint is not None and not callable(constraint):
      raise ValueError("The `constraint` argument must be a callable.")

    if isinstance(initial_value, trackable.CheckpointInitialValue):
      self._maybe_initialize_trackable()
      self._update_uid = initial_value.checkpoint_position.restore_uid
      initial_value = initial_value.wrapped_value

    with ops.name_scope(name, "Variable", []
                        if init_from_fn else [initial_value]) as name:
      with ops.name_scope("Initializer"), ops.device(None):
        initial_value = ops.convert_to_tensor(
            initial_value() if init_from_fn else initial_value,
            name="initial_value", dtype=dtype)
      assert initial_value is not None

      # Use the constructor for UninitializedVariable to start.
      super(UnliftedInitializerVariable, self).__init__(
          trainable=trainable,
          caching_device=caching_device,
          name=name,
          shape=initial_value.shape,
          dtype=initial_value.dtype,
          constraint=constraint,
          synchronization=synchronization,
          aggregation=aggregation,
          extra_handle_data=initial_value,
          **unused_kwargs)

      if self._in_graph_mode:
        with ops.init_scope():
          outer_graph = ops.get_default_graph()
        func_graph = ops.get_default_graph()
        function_placeholders = (
            func_graph.inputs + func_graph.internal_captures)
        placeholder_ops = set(
            [tensor.op for tensor in function_placeholders])
        lifted_initializer = lift_to_graph.lift_to_graph(
            [initial_value], outer_graph,
            disallowed_placeholders=placeholder_ops)[initial_value]
        with ops.init_scope():
          self._initial_value = lifted_initializer
          with ops.name_scope("IsInitialized"):
            self._is_initialized_op = (
                resource_variable_ops.var_is_initialized_op(self._handle))
          if initial_value is not None:
            with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
              self._initializer_op = resource_variable_ops.assign_variable_op(
                  self._handle, lifted_initializer, name=n)
      else:
        if add_initializers_to is not None:
          add_initializers_to[self] = initial_value
        def assign_fn():
          with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
            resource_variable_ops.assign_variable_op(
                self._handle,
                initial_value,
                name=n)
            # Returning values to keep tf.cond happy.
          return ops.convert_to_tensor(1)
        def not_assign_fn():
          return ops.convert_to_tensor(0)
        # Note: this cond is always guaranteed to run because we're inside a
        # defun which will insert automatic control dependencies.
        control_flow_ops.cond(
            resource_variable_ops.var_is_initialized_op(self._handle),
            not_assign_fn, assign_fn)
Example #37
0
  def __init__(self,  # pylint: disable=super-init-not-called
               initial_value=None,
               trainable=True,
               name=None,
               dtype=None,
               constraint=None,
               initialize=True,
               **unused_kwargs):
    """Creates a variable.

    Args:
      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
        which is the initial value for the Variable. The initial value must have
        a shape specified unless `validate_shape` is set to False. Can also be a
        callable with no argument that returns the initial value when called.
        (Note that initializer functions from init_ops.py must first be bound
         to a shape before being used here.)
      trainable: If `True`, automatically watches this variable on GradientTape
        whenever it's used.
      name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.
      dtype: If set, initial_value will be converted to the given type.
        If None, either the datatype will be kept (if initial_value is
        a Tensor) or float32 will be used (if it is a Python object convertible
        to a Tensor).
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value
        (which must have the same shape). Constraints are not safe to
        use when doing asynchronous distributed training.
      initialize: if True, runs initialization in eager execution; leaves the
        variable uninitialized otherwise.

    Raises:
      ValueError: If the initial value is not specified, or does not have a
        shape and `validate_shape` is `True`.
    """
    if initial_value is None:
      raise ValueError("initial_value must be specified.")
    init_from_fn = callable(initial_value)

    if isinstance(initial_value, ops.Tensor) and hasattr(
        initial_value, "graph") and initial_value.graph.building_function:
      raise ValueError("Tensor-typed variable initializers must either be "
                       "wrapped in an init_scope or callable "
                       "(e.g., `tf.Variable(lambda : "
                       "tf.truncated_normal([10, 40]))`) when building "
                       "functions. Please file a feature request if this "
                       "restriction inconveniences you.")

    if constraint is not None and not callable(constraint):
      raise ValueError("The `constraint` argument must be a callable.")

    if isinstance(initial_value, checkpointable.CheckpointInitialValue):
      self._maybe_initialize_checkpointable()
      self._update_uid = initial_value.checkpoint_position.restore_uid
      initial_value = initial_value.wrapped_value

    self._trainable = trainable
    self._save_slice_info = None
    # Store the graph key so optimizers know how to only retrieve variables from
    # this graph.
    self._graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
    with ops.init_scope():
      self._in_graph_mode = not context.executing_eagerly()
      with ops.name_scope(name, "Variable", []
                          if init_from_fn else [initial_value]) as name:
        # pylint: disable=protected-access
        handle_name = ops._name_from_scope_name(name)
        shared_name = handle_name
        if init_from_fn:
          # Use attr_scope and device(None) to simulate the behavior of
          # colocate_with when the variable we want to colocate with doesn't
          # yet exist.
          if self._in_graph_mode:
            with ops.name_scope("Initializer"), ops.device(None):
              initial_value = ops.convert_to_tensor(
                  initial_value(), name="initial_value", dtype=dtype)
            self._handle = _eager_safe_variable_handle(
                shape=initial_value.get_shape(),
                dtype=initial_value.dtype.base_dtype,
                shared_name=shared_name,
                name=name,
                graph_mode=self._in_graph_mode)
            self._shape = initial_value.get_shape()
          else:
            initial_value = initial_value()
            with ops.name_scope("Initializer"):
              initial_value = ops.convert_to_tensor(
                  initial_value, name="initial_value", dtype=dtype)
            self._handle = _eager_safe_variable_handle(
                shape=initial_value.get_shape(),
                dtype=initial_value.dtype.base_dtype,
                shared_name=shared_name,
                name=name,
                graph_mode=False)
            self._shape = initial_value.get_shape()
        # pylint: enable=protected-access

        # Or get the initial value from a Tensor or Python object.
        else:
          with ops.name_scope("Initializer"):
            initial_value = ops.convert_to_tensor(
                initial_value, name="initial_value", dtype=dtype)
          # pylint: disable=protected-access
          if (self._in_graph_mode and initial_value is not None and
              initial_value.op._get_control_flow_context() is not None):
            raise ValueError(
                "Initializer for variable %s is from inside a control-flow "
                "construct, such as a loop or conditional. When creating a "
                "variable inside a loop or conditional, use a lambda as the "
                "initializer." % name)
          # pylint: enable=protected-access
          self._handle = _eager_safe_variable_handle(
              shape=initial_value.get_shape(),
              dtype=initial_value.dtype.base_dtype,
              shared_name=shared_name,
              name=name,
              graph_mode=self._in_graph_mode)
          self._shape = initial_value.get_shape()

        self._unique_id = shared_name
        self._initial_value = initial_value if self._in_graph_mode else None
        self._handle_name = handle_name + ":0"
        self._dtype = initial_value.dtype.base_dtype
        self._constraint = constraint

        if self._in_graph_mode:
          with ops.name_scope("IsInitialized"):
            self._is_initialized_op = (
                resource_variable_ops.var_is_initialized_op(self._handle))
          if initial_value is not None:
            with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
              self._initializer_op = (
                  resource_variable_ops.assign_variable_op(
                      self._handle,
                      self._try_guard_against_uninitialized_dependencies(
                          initial_value),
                      name=n))
          with ops.name_scope("Read"), ops.colocate_with(self._handle):
            # Manually assign reads to the handle's device to avoid log
            # messages.
            with ops.device(self._handle.device):
              value = self._read_variable_op()
            self._graph_element = value
            self._cached_value = None
        else:
          if initialize:
            resource_variable_ops.assign_variable_op(self._handle,
                                                     initial_value)
          self._is_initialized_op = None
          self._initializer_op = None
          self._graph_element = None
          self._cached_value = None

    self._handle_deleter = None
    self._cached_shape_as_list = None
Example #38
0
  def __init__(self,
               initial_value=None,
               trainable=None,
               caching_device=None,
               name=None,
               dtype=None,
               constraint=None,
               add_initializers_to=None,
               lifted_initializer_graph=None,
               synchronization=None,
               aggregation=None,
               shape=None,
               **unused_kwargs):
    """Creates a variable.

    Args:
      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
        which is the initial value for the Variable. The initial value must have
        a shape specified unless `validate_shape` is set to False. Can also be a
        callable with no argument that returns the initial value when called.
        (Note that initializer functions from init_ops.py must first be bound
         to a shape before being used here.)
      trainable: If `True`, GradientTapes automatically watch uses of this
        Variable.
      caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.  If not `None`, caches on another device.  Typical use is to
        cache on the device where the Ops using the Variable reside, to
        deduplicate copying through `Switch` and other conditional statements.
      name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.
      dtype: If set, initial_value will be converted to the given type.
        If None, either the datatype will be kept (if initial_value is
       a Tensor) or float32 will be used (if it is a Python object convertible
       to a Tensor).
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value
        (which must have the same shape). Constraints are not safe to
        use when doing asynchronous distributed training.
      add_initializers_to: if not None and not in legacy graph mode, the
        initializer tensor will be added to this map in addition to adding the
        assignment to the function.
      lifted_initializer_graph: FuncGraph to try to lift initializers to.
      synchronization: Indicates when a distributed a variable will be
        aggregated. Accepted values are constants defined in the class
        `tf.VariableSynchronization`. By default the synchronization is set to
        `AUTO` and the current `DistributionStrategy` chooses
        when to synchronize. If `synchronization` is set to `ON_READ`,
        `trainable` must not be set to `True`.
      aggregation: Indicates how a distributed variable will be aggregated.
        Accepted values are constants defined in the class
        `tf.VariableAggregation`.
      shape: (optional) The shape of this variable. If None, the shape of
        `initial_value` will be used. When setting this argument to
        `tf.TensorShape(None)` (representing an unspecified shape), the variable
        can be assigned with values of different shapes.

    Raises:
      ValueError: If the initial value is not specified, or does not have a
        shape and `validate_shape` is `True`.
      RuntimeError: If called outside of a function definition.
    """
    if not ops.inside_function():
      # If we've been init_scope()d out of the function definition nothing to do
      # here; we can't really do the capturing or conditional logic.
      resource_variable_ops.ResourceVariable.__init__(
          self, initial_value=initial_value, trainable=trainable,
          caching_device=caching_device, name=name, dtype=dtype,
          constraint=constraint)
      return
    if initial_value is None:
      raise ValueError("initial_value must be specified.")
    init_from_fn = callable(initial_value)

    if constraint is not None and not callable(constraint):
      raise ValueError("The `constraint` argument must be a callable.")

    if isinstance(initial_value, trackable.CheckpointInitialValue):
      self._maybe_initialize_trackable()
      self._update_uid = initial_value.checkpoint_position.restore_uid
      initial_value = initial_value.wrapped_value

    with ops.name_scope(name, "Variable", []
                        if init_from_fn else [initial_value]) as name:
      with ops.name_scope("Initializer"), ops.device(None):
        initial_value = ops.convert_to_tensor(
            initial_value() if init_from_fn else initial_value,
            name="initial_value", dtype=dtype)
      assert initial_value is not None

      # Don't use `shape or initial_value.shape` since TensorShape has
      # overridden `__bool__`.
      if shape is None:
        shape = initial_value.shape

      # Use the constructor for UninitializedVariable to start.
      super(UnliftedInitializerVariable, self).__init__(
          trainable=trainable,
          caching_device=caching_device,
          name=name,
          shape=shape,
          dtype=initial_value.dtype,
          constraint=constraint,
          synchronization=synchronization,
          aggregation=aggregation,
          extra_handle_data=initial_value,
          **unused_kwargs)

      if self._in_graph_mode:
        with ops.init_scope():
          outer_graph = ops.get_default_graph()
        func_graph = ops.get_default_graph()
        function_placeholders = (
            func_graph.inputs + func_graph.internal_captures)
        placeholder_ops = set(
            [tensor.op for tensor in function_placeholders])
        lifted_initializer = lift_to_graph.lift_to_graph(
            [initial_value], outer_graph,
            disallowed_placeholders=placeholder_ops)[initial_value]
        with ops.init_scope():
          self._initial_value = lifted_initializer
          with ops.name_scope("IsInitialized"):
            self._is_initialized_op = (
                resource_variable_ops.var_is_initialized_op(self._handle))
          if initial_value is not None:
            with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
              self._initializer_op = resource_variable_ops.assign_variable_op(
                  self._handle, lifted_initializer, name=n)
      else:
        if add_initializers_to is not None:
          add_initializers_to[self] = initial_value
        def assign_fn():
          with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
            resource_variable_ops.assign_variable_op(
                self._handle,
                initial_value,
                name=n)
            # Returning values to keep tf.cond happy.
          return ops.convert_to_tensor(1)
        def not_assign_fn():
          return ops.convert_to_tensor(0)
        # Note: this cond is always guaranteed to run because we're inside a
        # defun which will insert automatic control dependencies.
        control_flow_ops.cond(
            resource_variable_ops.var_is_initialized_op(self._handle),
            not_assign_fn, assign_fn)
Example #39
0
    def __init__(
            self,  # pylint: disable=super-init-not-called
            initial_value=None,
            trainable=None,
            caching_device=None,
            name=None,
            dtype=None,
            constraint=None,
            add_initializers_to=None,
            lifted_initializer_graph=None,
            synchronization=None,
            aggregation=None,
            **unused_kwargs):
        """Creates a variable.

    Args:
      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
        which is the initial value for the Variable. The initial value must have
        a shape specified unless `validate_shape` is set to False. Can also be a
        callable with no argument that returns the initial value when called.
        (Note that initializer functions from init_ops.py must first be bound
         to a shape before being used here.)
      trainable: If `True`, GradientTapes automatically watch uses of this
        Variable.
      caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.  If not `None`, caches on another device.  Typical use is to
        cache on the device where the Ops using the Variable reside, to
        deduplicate copying through `Switch` and other conditional statements.
      name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.
      dtype: If set, initial_value will be converted to the given type.
        If None, either the datatype will be kept (if initial_value is
       a Tensor) or float32 will be used (if it is a Python object convertible
       to a Tensor).
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value
        (which must have the same shape). Constraints are not safe to
        use when doing asynchronous distributed training.
      add_initializers_to: if not None and not in legacy graph mode, the
        initializer tensor will be added to this map in addition to adding the
        assignment to the function.
      lifted_initializer_graph: FuncGraph to try to lift initializers to.
      synchronization: Indicates when a distributed a variable will be
        aggregated. Accepted values are constants defined in the class
        `tf.VariableSynchronization`. By default the synchronization is set to
        `AUTO` and the current `DistributionStrategy` chooses
        when to synchronize. If `synchronization` is set to `ON_READ`,
        `trainable` must not be set to `True`.
      aggregation: Indicates how a distributed variable will be aggregated.
        Accepted values are constants defined in the class
        `tf.VariableAggregation`.

    Raises:
      ValueError: If the initial value is not specified, or does not have a
        shape and `validate_shape` is `True`.
      RuntimeError: If called outside of a function definition.
    """
        if not ops.inside_function():
            # If we've been init_scope()d out of the function definition nothing to do
            # here; we can't really do the capturing or conditional logic.
            resource_variable_ops.ResourceVariable.__init__(
                self,
                initial_value=initial_value,
                trainable=trainable,
                caching_device=caching_device,
                name=name,
                dtype=dtype,
                constraint=constraint)
            return
        with ops.init_scope():
            self._in_graph_mode = not context.executing_eagerly()
        if initial_value is None:
            raise ValueError("initial_value must be specified.")
        init_from_fn = callable(initial_value)

        if constraint is not None and not callable(constraint):
            raise ValueError("The `constraint` argument must be a callable.")

        if isinstance(initial_value, trackable.CheckpointInitialValue):
            self._maybe_initialize_trackable()
            self._update_uid = initial_value.checkpoint_position.restore_uid
            initial_value = initial_value.wrapped_value

        synchronization, aggregation, trainable = (
            variables.validate_synchronization_aggregation_trainable(
                synchronization, aggregation, trainable, name))
        self._trainable = trainable
        self._synchronization = synchronization
        self._aggregation = aggregation
        self._save_slice_info = None
        self._initial_value = None
        self._initializer_op = None
        self._is_initialized_op = None
        self._graph_element = None
        self._cached_value = None
        # Store the graph key so optimizers know how to only retrieve variables from
        # this graph. Guaranteed to be the same as the eager graph_key.
        self._graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
        with ops.name_scope(name, "Variable",
                            [] if init_from_fn else [initial_value]) as name:
            # pylint: disable=protected-access
            with ops.init_scope():
                handle_name = ops.name_from_scope_name(name)
                unique_id = "%s_%d" % (handle_name, ops.uid())
                shared_name = context.shared_name(unique_id)
            with ops.name_scope("Initializer"), ops.device(None):
                initial_value = ops.convert_to_tensor(
                    initial_value() if init_from_fn else initial_value,
                    name="initial_value",
                    dtype=dtype)
            with ops.init_scope():
                self._handle = resource_variable_ops.eager_safe_variable_handle(
                    initial_value=initial_value,
                    shared_name=shared_name,
                    name=name,
                    graph_mode=self._in_graph_mode)
            self._shape = initial_value.shape
            self._unique_id = unique_id
            self._handle_name = handle_name + ":0"
            self._dtype = initial_value.dtype.base_dtype
            self._constraint = constraint
            assert initial_value is not None
            if self._in_graph_mode:
                with ops.init_scope():
                    outer_graph = ops.get_default_graph()
                func_graph = ops.get_default_graph()
                function_placeholders = (func_graph.inputs +
                                         func_graph.internal_captures)
                placeholder_ops = set(
                    [tensor.op for tensor in function_placeholders])
                lifted_initializer = lift_to_graph.lift_to_graph(
                    [initial_value],
                    outer_graph,
                    disallowed_placeholders=placeholder_ops)[initial_value]
                with ops.init_scope():
                    self._initial_value = lifted_initializer
                    with ops.name_scope("IsInitialized"):
                        self._is_initialized_op = (
                            resource_variable_ops.var_is_initialized_op(
                                self._handle))
                    if initial_value is not None:
                        with ops.name_scope("Assign") as n, ops.colocate_with(
                                self._handle):
                            self._initializer_op = resource_variable_ops.assign_variable_op(
                                self._handle, lifted_initializer, name=n)
                    with ops.name_scope("Read"), ops.colocate_with(
                            self._handle):
                        # Manually assign reads to the handle's device to avoid log
                        # messages.
                        with ops.device(self._handle.device):
                            value = self._read_variable_op()
                        self._graph_element = value
                    ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, self)
            else:
                if add_initializers_to is not None:
                    add_initializers_to[self] = initial_value

                def assign_fn():
                    with ops.name_scope("Assign") as n, ops.colocate_with(
                            self._handle):
                        resource_variable_ops.assign_variable_op(self._handle,
                                                                 initial_value,
                                                                 name=n)
                        # Returning values to keep tf.cond happy.
                    return ops.convert_to_tensor(1)

                def not_assign_fn():
                    return ops.convert_to_tensor(0)

                # Note: this cond is always guaranteed to run because we're inside a
                # defun which will insert automatic control dependencies.
                control_flow_ops.cond(
                    resource_variable_ops.var_is_initialized_op(self._handle),
                    not_assign_fn, assign_fn)

        # After the handle has been created, set up a way to clean it up when
        # executing eagerly. We'll hold the only reference to the deleter, so that
        # when this object is garbage collected the deleter will be too. This
        # means ResourceVariables can be part of reference cycles without those
        # cycles being uncollectable.
        if not self._in_graph_mode:
            self._handle_deleter = resource_variable_ops.EagerResourceDeleter(
                handle=self._handle, handle_device=self._handle.device)
        self._cached_shape_as_list = None
Example #40
0
 def testDtypeSurvivesIdentity(self):
   with self.test_session():
     handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
     id_handle = array_ops.identity(handle)
     resource_variable_ops.assign_variable_op(
         id_handle, constant_op.constant(0, dtype=dtypes.int32)).run()