def testIdentityOnVariable(self):
   with self.test_scope():
     v = resource_variable_ops.ResourceVariable(True)
     i = array_ops.identity(v)
   self.assertAllEqual(True, i.numpy())
Exemple #2
0
 def model():
     return resource_variable_ops.ResourceVariable(name="same_name",
                                                   initial_value=1) + 1
 def benchmark_tf_transpose_variable_2_by_2_GPU(self):
   with context.device(GPU):
     m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
     self._benchmark_transpose(m, num_iters=self._num_iters_2_by_2)
 def testScatterSubStateOps(self):
     with context.eager_mode():
         v = resource_variable_ops.ResourceVariable([1.0, 2.0], name="sub")
         state_ops.scatter_sub(v, [1], [3])
         self.assertAllEqual([1.0, -1.0], v.numpy())
 def testAssignIncompatibleShape(self):
     v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
     self.evaluate(v.initializer)
     with self.assertRaisesRegexp(Exception, r"hapes must be equal"):
         self.assertAllEqual(self.evaluate(v.assign_add(1)), [1, 2, 3, 4])
 def testDtypeAfterFromProto(self):
     v = resource_variable_ops.ResourceVariable(2.0)
     w = resource_variable_ops.ResourceVariable.from_proto(v.to_proto())
     self.assertIsInstance(w.dtype, dtypes.DType)
     self.assertEqual(v.dtype, w.dtype)
 def testEagerNameNotNeeded(self):
     with context.eager_mode():
         v0 = resource_variable_ops.ResourceVariable(1.0)
         self.assertAllEqual(v0.numpy(), 1.0)
 def testDifferentAssignGraph(self):
     with ops.Graph().as_default():
         v = resource_variable_ops.ResourceVariable(1.0)
     ops.reset_default_graph()
     v.assign(
         2.0)  # Note: this fails if we run convert_to_tensor on not the
 def testUnreadOpName(self):
     v = resource_variable_ops.ResourceVariable(1.0)
     self.assertNotEqual(v.name, v.assign_add(1.0).name)
 def testEagerBool(self):
     with context.eager_mode():
         v = resource_variable_ops.ResourceVariable(False, name="bool_test")
         self.assertAllEqual(bool(v), False)
 def testStridedSliceAssign(self):
     v = resource_variable_ops.ResourceVariable([1.0, 2.0])
     self.evaluate(variables.global_variables_initializer())
     self.evaluate(v[0].assign(2.0))
     self.assertAllEqual(self.evaluate(v), [2.0, 2.0])
 def testEagerInitializedValue(self):
     with context.eager_mode():
         variable = resource_variable_ops.ResourceVariable(
             1.0, name="eager-init")
         self.assertAllEqual(variable.numpy(), 1.0)
         self.assertAllEqual(variable.initialized_value().numpy(), 1.0)
Exemple #13
0
    def testAddVariable(self):
        obj = NonLayerTrackable()
        with self.assertRaisesRegex(ValueError, "do not specify shape"):
            trackable_utils.add_variable(obj,
                                         name="shape_specified_twice",
                                         shape=[],
                                         initializer=1)
        constant_initializer = trackable_utils.add_variable(
            obj, name="constant_initializer", initializer=1)
        with variable_scope.variable_scope("some_variable_scope"):
            ones_initializer = trackable_utils.add_variable(
                obj,
                name="ones_initializer",
                shape=[2],
                initializer=init_ops.ones_initializer(dtype=dtypes.float32))
        bare_initializer = trackable_utils.add_variable(
            obj,
            name="bare_initializer",
            shape=[2, 2],
            dtype=dtypes.float64,
            initializer=init_ops.zeros_initializer)

        # Even in graph mode, there are no naming conflicts between objects, only
        # naming conflicts within an object.
        other_duplicate = resource_variable_ops.ResourceVariable(
            name="duplicate", initial_value=1.)
        duplicate = trackable_utils.add_variable(obj,
                                                 name="duplicate",
                                                 shape=[])
        with self.assertRaisesRegex(ValueError,
                                    "'duplicate'.*already declared"):
            trackable_utils.add_variable(obj, name="duplicate", shape=[])

        self.evaluate(trackable_utils.gather_initializers(obj))
        self.assertEqual("constant_initializer:0", constant_initializer.name)
        self.assertEqual(1, self.evaluate(constant_initializer))
        self.assertEqual("some_variable_scope/ones_initializer:0",
                         ones_initializer.name)
        self.assertAllEqual([1, 1], self.evaluate(ones_initializer))
        self.assertAllEqual([[0., 0.], [0., 0.]],
                            self.evaluate(bare_initializer))
        self.assertEqual("a_variable:0", obj.a_variable.name)
        self.assertEqual("duplicate:0", other_duplicate.name)
        if context.executing_eagerly():
            # When executing eagerly, there's no uniquification of variable names. The
            # checkpoint name will be the same.
            self.assertEqual("duplicate:0", duplicate.name)
        else:
            # The .name attribute may be globally influenced, but the checkpoint name
            # won't be (tested below).
            self.assertEqual("duplicate_1:0", duplicate.name)
        named_variables, _, _ = (
            graph_view.ObjectGraphView(obj).serialize_object_graph())
        expected_checkpoint_names = (
            "a_variable/.ATTRIBUTES/VARIABLE_VALUE",
            "bare_initializer/.ATTRIBUTES/VARIABLE_VALUE",
            "constant_initializer/.ATTRIBUTES/VARIABLE_VALUE",
            "duplicate/.ATTRIBUTES/VARIABLE_VALUE",
            "ones_initializer/.ATTRIBUTES/VARIABLE_VALUE",
        )
        six.assertCountEqual(self, expected_checkpoint_names,
                             [v.name for v in named_variables])
 def testAssignAddVariable(self):
   with self.test_scope():
     v = resource_variable_ops.ResourceVariable(1.0)
     v.assign_add(2.0)
   self.assertEqual(3.0, v.numpy())
 def testLoad(self):
     v = resource_variable_ops.ResourceVariable(1.0, name="var0")
     self.evaluate(variables.global_variables_initializer())
     v.load(2.0)
     self.assertEqual(2.0, self.evaluate(v.value()))
 def testInitFn(self):
     with self.cached_session():
         v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
                                                    dtype=dtypes.float32)
         self.assertEqual(v.handle.op.colocation_groups(),
                          v.initializer.inputs[1].op.colocation_groups())
 def testGPUInt64(self):
     if not context.context().num_gpus():
         return
     with context.eager_mode(), context.device("gpu:0"):
         v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int64)
         self.assertAllEqual(1, v.numpy())
 def testHandleNumpy(self):
     with context.eager_mode():
         with self.assertRaises(ValueError):
             resource_variable_ops.ResourceVariable(
                 1.0, name="handle-numpy").handle.numpy()
 def testEagerNameNotIdentity(self):
     with context.eager_mode():
         v0 = resource_variable_ops.ResourceVariable(1.0, name="a")
         v1 = resource_variable_ops.ResourceVariable(2.0, name="a")
         self.assertAllEqual(v0.numpy(), 1.0)
         self.assertAllEqual(v1.numpy(), 2.0)
 def testCountUpToFunction(self):
     with context.eager_mode():
         v = resource_variable_ops.ResourceVariable(0, name="upto")
         self.assertAllEqual(state_ops.count_up_to(v, 1), 0)
         with self.assertRaises(errors.OutOfRangeError):
             state_ops.count_up_to(v, 1)
 def body(i, _):
     zero = array_ops.zeros([], dtype=dtypes.int32)
     v = resource_variable_ops.ResourceVariable(initial_value=zero)
     return (i + 1, v.read_value())
 def testInitFnDtype(self):
     v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
                                                dtype=dtypes.float32,
                                                name="var0")
     self.assertEqual(dtypes.float32, v.value().dtype)
 def testScatterUpdateCast(self):
     with context.eager_mode():
         v = resource_variable_ops.ResourceVariable([1.0, 2.0],
                                                    name="update")
         state_ops.scatter_update(v, [1], [3])
         self.assertAllEqual([1.0, 3.0], v.numpy())
 def testInitFnNoDtype(self):
     v = resource_variable_ops.ResourceVariable(initial_value=lambda: 1,
                                                name="var2")
     self.assertEqual(dtypes.int32, v.value().dtype)
 def __init__(self):
     self.v = resource_variable_ops.ResourceVariable(1.0)
 def testInitializeAllVariables(self):
     v = resource_variable_ops.ResourceVariable(1,
                                                dtype=dtypes.float32,
                                                name="var0")
     self.evaluate(variables.global_variables_initializer())
     self.assertEqual(1.0, self.evaluate(v.value()))
 def benchmark_tf_zeros_like_variable_GPU(self):
   m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
   self._benchmark_tf_zeros_like(m, device=GPU)
 def testOperatorOverload(self):
     v = resource_variable_ops.ResourceVariable(1.0, name="var0")
     self.evaluate(variables.global_variables_initializer())
     self.assertEqual(2.0, self.evaluate(v + v))
 def benchmark_read_variable_op_2_by_2_CPU(self):
   with context.device(CPU):
     m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
     self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
Exemple #30
0
 def testGradientTapeVariable(self):
     v = resource_variable_ops.ResourceVariable(1.0, name='v')
     with backprop.GradientTape() as g:
         y = v * v
     grad = g.gradient(y, [v])[0]
     self.assertAllEqual(grad, 2.0)