Beispiel #1
0
def test_sparse_repeated_indices(dtype):
    # todo: remove the with tf.device once the placement on cpu is enforced.
    with tf.device("CPU:0"):
        repeated_index_update_var = tf.Variable([[1], [2]], dtype=dtype)
        aggregated_update_var = tf.Variable([[1], [2]], dtype=dtype)
        grad_repeated_index = tf.IndexedSlices(
            tf.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
            tf.constant([1, 1]),
            tf.constant([2, 1]),
        )
        grad_aggregated = tf.IndexedSlices(
            tf.constant([0.2], shape=[1, 1], dtype=dtype),
            tf.constant([1]),
            tf.constant([2, 1]),
        )
        repeated_update_opt = lazy_adam.LazyAdam()
        aggregated_update_opt = lazy_adam.LazyAdam()
        for _ in range(3):
            repeated_update_opt.apply_gradients(
                [(grad_repeated_index, repeated_index_update_var)]
            )
            aggregated_update_opt.apply_gradients(
                [(grad_aggregated, aggregated_update_var)]
            )
            np.testing.assert_allclose(
                aggregated_update_var.numpy(), repeated_index_update_var.numpy()
            )
Beispiel #2
0
 def testSparseRepeatedIndices(self):
     for dtype in [tf.dtypes.half, tf.dtypes.float32, tf.dtypes.float64]:
         with self.cached_session():
             repeated_index_update_var = tf.Variable([[1.0], [2.0]],
                                                     dtype=dtype)
             aggregated_update_var = tf.Variable([[1.0], [2.0]],
                                                 dtype=dtype)
             grad_repeated_index = tf.IndexedSlices(
                 tf.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
                 tf.constant([1, 1]),
                 tf.constant([2, 1]),
             )
             grad_aggregated = tf.IndexedSlices(
                 tf.constant([0.2], shape=[1, 1], dtype=dtype),
                 tf.constant([1]),
                 tf.constant([2, 1]),
             )
             repeated_update_opt = lazy_adam.LazyAdam()
             repeated_update = repeated_update_opt.apply_gradients([
                 (grad_repeated_index, repeated_index_update_var)
             ])
             aggregated_update_opt = lazy_adam.LazyAdam()
             aggregated_update = aggregated_update_opt.apply_gradients([
                 (grad_aggregated, aggregated_update_var)
             ])
             self.evaluate(tf.compat.v1.global_variables_initializer())
             self.assertAllClose(aggregated_update_var.eval(),
                                 repeated_index_update_var.eval())
             for _ in range(3):
                 repeated_update.run()
                 aggregated_update.run()
                 self.assertAllClose(aggregated_update_var.eval(),
                                     repeated_index_update_var.eval())
Beispiel #3
0
def test_sharing(dtype):
    # Initialize tf for numpy implementation.
    m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
    var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
    grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
    var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
    grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

    var0 = tf.Variable(var0_np)
    var1 = tf.Variable(var1_np)
    grads0 = tf.constant(grads0_np)
    grads1 = tf.constant(grads1_np)
    opt = lazy_adam.LazyAdam()

    # Fetch params to validate initial values
    np.testing.assert_allclose([1.0, 2.0], var0.numpy())
    np.testing.assert_allclose([3.0, 4.0], var1.numpy())

    # Run 3 steps of intertwined Adam1 and Adam2.
    for t in range(3):
        beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
        test_utils.assert_allclose_according_to_type(0.9 ** (t + 1), beta_1_power)
        test_utils.assert_allclose_according_to_type(0.999 ** (t + 1), beta_2_power)
        opt.apply_gradients(zip([grads0, grads1], [var0, var1]))

        var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
        var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)

        # Validate updated params
        test_utils.assert_allclose_according_to_type(var0_np, var0.numpy())
        test_utils.assert_allclose_according_to_type(var1_np, var1.numpy())
Beispiel #4
0
def test_basic(use_callable_params, dtype):
    # Initialize tf for numpy implementation.
    m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
    var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
    grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
    var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
    grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

    var0 = tf.Variable(var0_np)
    var1 = tf.Variable(var1_np)
    grads0 = tf.constant(grads0_np)
    grads1 = tf.constant(grads1_np)

    def learning_rate():
        return 0.001

    if not use_callable_params:
        learning_rate = learning_rate()

    opt = lazy_adam.LazyAdam(learning_rate=learning_rate)

    # Run 3 steps of Adam
    for t in range(3):
        beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
        test_utils.assert_allclose_according_to_type(0.9 ** (t + 1), beta_1_power)
        test_utils.assert_allclose_according_to_type(0.999 ** (t + 1), beta_2_power)
        opt.apply_gradients(zip([grads0, grads1], [var0, var1]))

        var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
        var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)

        # Validate updated params
        test_utils.assert_allclose_according_to_type(var0_np, var0.numpy())
        test_utils.assert_allclose_according_to_type(var1_np, var1.numpy())
Beispiel #5
0
def test_slots_unique_eager():
    v1 = tf.Variable(1.0)
    v2 = tf.Variable(1.0)
    opt = lazy_adam.LazyAdam(1.0)
    opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
    # There should be iteration, and two unique slot variables for v1 and v2.
    assert 5 == len(opt.variables())
    assert opt.variables()[0] == opt.iterations
Beispiel #6
0
 def testSlotsUniqueEager(self):
     v1 = tf.Variable(1.0)
     v2 = tf.Variable(1.0)
     opt = lazy_adam.LazyAdam(1.0)
     opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
     # There should be iteration, and two unique slot variables for v1 and v2.
     self.assertEqual(5, len(opt.variables()))
     self.assertEqual(self.evaluate(opt.variables()[0]),
                      self.evaluate(opt.iterations))
Beispiel #7
0
    def doTestBasic(self, use_callable_params=False):
        for i, dtype in enumerate(
            [tf.dtypes.half, tf.dtypes.float32, tf.dtypes.float64]):
            with self.session(graph=tf.Graph()):
                # Initialize tf for numpy implementation.
                m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
                var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
                grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
                var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
                grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

                var0 = tf.Variable(var0_np, name="var0_%d" % i)
                var1 = tf.Variable(var1_np, name="var1_%d" % i)
                grads0 = tf.constant(grads0_np)
                grads1 = tf.constant(grads1_np)

                def learning_rate():
                    return 0.001

                if not use_callable_params:
                    learning_rate = learning_rate()

                opt = lazy_adam.LazyAdam(learning_rate=learning_rate)
                if not tf.executing_eagerly():
                    update = opt.apply_gradients(
                        zip([grads0, grads1], [var0, var1]))
                    self.evaluate(tf.compat.v1.global_variables_initializer())
                    # Fetch params to validate initial values
                    self.assertAllClose([1.0, 2.0], self.evaluate(var0))
                    self.assertAllClose([3.0, 4.0], self.evaluate(var1))

                # Run 3 steps of Adam
                for t in range(3):
                    beta_1_power, beta_2_power = get_beta_accumulators(
                        opt, dtype)
                    self.assertAllCloseAccordingToType(
                        0.9**(t + 1), self.evaluate(beta_1_power))
                    self.assertAllCloseAccordingToType(
                        0.999**(t + 1), self.evaluate(beta_2_power))
                    if not tf.executing_eagerly():
                        self.evaluate(update)
                    else:
                        opt.apply_gradients(zip([grads0, grads1],
                                                [var0, var1]))

                    var0_np, m0, v0 = adam_update_numpy(
                        var0_np, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adam_update_numpy(
                        var1_np, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np,
                                                       self.evaluate(var0))
                    self.assertAllCloseAccordingToType(var1_np,
                                                       self.evaluate(var1))
                    self.assertEqual("LazyAdam/var0_%d/m:0" % (i, ),
                                     opt.get_slot(var0, "m").name)
Beispiel #8
0
    def testSparse(self):
        for dtype in [tf.dtypes.half, tf.dtypes.float32, tf.dtypes.float64]:
            with self.cached_session():
                # Initialize tf for numpy implementation.
                m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
                var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
                grads0_np = np.array([0.1, 0.0, 0.1],
                                     dtype=dtype.as_numpy_dtype)
                var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
                grads1_np = np.array([0.01, 0.0, 0.01],
                                     dtype=dtype.as_numpy_dtype)

                var0 = tf.Variable(var0_np)
                var1 = tf.Variable(var1_np)
                grads0_np_indices = np.array([0, 2], dtype=np.int32)
                grads0 = tf.IndexedSlices(
                    tf.constant(grads0_np[grads0_np_indices]),
                    tf.constant(grads0_np_indices),
                    tf.constant([3]),
                )
                grads1_np_indices = np.array([0, 2], dtype=np.int32)
                grads1 = tf.IndexedSlices(
                    tf.constant(grads1_np[grads1_np_indices]),
                    tf.constant(grads1_np_indices),
                    tf.constant([3]),
                )
                opt = lazy_adam.LazyAdam()
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                self.evaluate(tf.compat.v1.global_variables_initializer())

                # Fetch params to validate initial values
                self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0))
                self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1))

                beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
                # Run 3 steps of Adam
                for t in range(3):
                    self.assertAllCloseAccordingToType(
                        0.9**(t + 1), self.evaluate(beta_1_power))
                    self.assertAllCloseAccordingToType(
                        0.999**(t + 1), self.evaluate(beta_2_power))
                    self.evaluate(update)

                    var0_np, m0, v0 = adam_update_numpy(
                        var0_np, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adam_update_numpy(
                        var1_np, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np,
                                                       self.evaluate(var0))
                    self.assertAllCloseAccordingToType(var1_np,
                                                       self.evaluate(var1))
Beispiel #9
0
 def testSparseDevicePlacement(self):
     for index_dtype in [tf.dtypes.int32, tf.dtypes.int64]:
         with self.cached_session(force_gpu=tf.test.is_gpu_available()):
             # If a GPU is available, tests that all optimizer ops can be placed on
             # it (i.e. they have GPU kernels).
             var = tf.Variable([[1.0], [2.0]])
             indices = tf.constant([0, 1], dtype=index_dtype)
             g_sum = lambda: tf.math.reduce_sum(tf.gather(var, indices))  # pylint: disable=cell-var-from-loop
             optimizer = lazy_adam.LazyAdam(3.0)
             minimize_op = optimizer.minimize(g_sum, var_list=[var])
             self.evaluate(tf.compat.v1.global_variables_initializer())
             self.evaluate(minimize_op)
Beispiel #10
0
def test_sparse_device_placement(dtype):

    # If a GPU is available, tests that all optimizer ops can be placed on
    # it (i.e. they have GPU kernels).
    var = tf.Variable([[1.0], [2.0]])
    indices = tf.constant([0, 1], dtype=dtype)

    def g_sum():
        return tf.math.reduce_sum(tf.gather(var, indices))

    optimizer = lazy_adam.LazyAdam(3.0)
    optimizer.minimize(g_sum, var_list=[var])
Beispiel #11
0
    def testSharing(self):
        for dtype in [tf.dtypes.half, tf.dtypes.float32, tf.dtypes.float64]:
            with self.cached_session():
                # Initialize tf for numpy implementation.
                m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
                var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
                grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
                var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
                grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

                var0 = tf.Variable(var0_np)
                var1 = tf.Variable(var1_np)
                grads0 = tf.constant(grads0_np)
                grads1 = tf.constant(grads1_np)
                opt = lazy_adam.LazyAdam()
                update1 = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                update2 = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                self.evaluate(variables.global_variables_initializer())

                beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)

                # Fetch params to validate initial values
                self.assertAllClose([1.0, 2.0], self.evaluate(var0))
                self.assertAllClose([3.0, 4.0], self.evaluate(var1))

                # Run 3 steps of intertwined Adam1 and Adam2.
                for t in range(3):
                    self.assertAllCloseAccordingToType(
                        0.9**(t + 1), self.evaluate(beta_1_power))
                    self.assertAllCloseAccordingToType(
                        0.999**(t + 1), self.evaluate(beta_2_power))
                    if t % 2 == 0:
                        update1.run()
                    else:
                        update2.run()

                    var0_np, m0, v0 = adam_update_numpy(
                        var0_np, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adam_update_numpy(
                        var1_np, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np,
                                                       self.evaluate(var0))
                    self.assertAllCloseAccordingToType(var1_np,
                                                       self.evaluate(var1))
Beispiel #12
0
def test_serialization():
    optimizer = lazy_adam.LazyAdam()
    config = tf.keras.optimizers.serialize(optimizer)
    new_optimizer = tf.keras.optimizers.deserialize(config)
    assert new_optimizer.get_config() == optimizer.get_config()