Exemplo n.º 1
0
    def testSparse(self, use_resource):
        for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
            with self.cached_session():
                # Initialize variables for numpy implementation.
                m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
                var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
                grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
                var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
                grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

                if use_resource:
                    global_step = resource_variable_ops.ResourceVariable(
                        array_ops.zeros([], dtypes.int64))
                    var0 = resource_variable_ops.ResourceVariable(var0_np)
                    var1 = resource_variable_ops.ResourceVariable(var1_np)
                else:
                    global_step = variables.Variable(
                        array_ops.zeros([], dtypes.int64))
                    var0 = variables.Variable(var0_np)
                    var1 = variables.Variable(var1_np)

                grads0_np_indices = np.array([0, 1], dtype=np.int32)
                grads0 = ops.IndexedSlices(
                    constant_op.constant(grads0_np),
                    constant_op.constant(grads0_np_indices),
                    constant_op.constant([2]))
                grads1_np_indices = np.array([0, 1], dtype=np.int32)
                grads1 = ops.IndexedSlices(
                    constant_op.constant(grads1_np),
                    constant_op.constant(grads1_np_indices),
                    constant_op.constant([2]))
                opt = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(
                    global_step=global_step)
                update = opt.apply_gradients(zip([grads0, grads1],
                                                 [var0, var1]),
                                             global_step=global_step)
                variables.global_variables_initializer().run()

                # Fetch params to validate initial values
                self.assertAllClose([1.0, 2.0], var0.eval())
                self.assertAllClose([3.0, 4.0], var1.eval())

                beta1_power, beta2_power = opt._get_beta_accumulators()

                # Run 3 steps of Adam
                for t in range(1, 4):
                    self.assertAllCloseAccordingToType(0.9**t,
                                                       beta1_power.eval())
                    self.assertAllCloseAccordingToType(0.999**t,
                                                       beta2_power.eval())
                    update.run()

                    var0_np, m0, v0 = adam_update_numpy(
                        var0_np, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adam_update_numpy(
                        var1_np, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np, var0.eval())
                    self.assertAllCloseAccordingToType(var1_np, var1.eval())
Exemplo n.º 2
0
    def testSparseRepeatedIndices(self, use_resource):
        for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
            with self.cached_session():
                if use_resource:
                    repeated_index_global_step = resource_variable_ops.ResourceVariable(
                        array_ops.zeros([], dtypes.int64))
                    aggregated_global_step = resource_variable_ops.ResourceVariable(
                        array_ops.zeros([], dtypes.int64))
                    repeated_index_update_var = resource_variable_ops.ResourceVariable(
                        [[1.0], [2.0]], dtype=dtype)
                    aggregated_update_var = resource_variable_ops.ResourceVariable(
                        [[1.0], [2.0]], dtype=dtype)
                else:
                    repeated_index_global_step = variables.Variable(
                        array_ops.zeros([], dtypes.int64))
                    aggregated_global_step = variables.Variable(
                        array_ops.zeros([], dtypes.int64))
                    repeated_index_update_var = variables.Variable(
                        [[1.0], [2.0]], dtype=dtype)
                    aggregated_update_var = variables.Variable([[1.0], [2.0]],
                                                               dtype=dtype)

                grad_repeated_index = ops.IndexedSlices(
                    constant_op.constant([0.1, 0.1], shape=[2, 1],
                                         dtype=dtype),
                    constant_op.constant([1, 1]), constant_op.constant([2, 1]))
                grad_aggregated = ops.IndexedSlices(
                    constant_op.constant([0.2], shape=[1, 1], dtype=dtype),
                    constant_op.constant([1]), constant_op.constant([2, 1]))
                repeated_update_opt = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(
                    global_step=repeated_index_global_step)
                repeated_update = repeated_update_opt.apply_gradients(
                    [(grad_repeated_index, repeated_index_update_var)],
                    global_step=repeated_index_global_step)
                aggregated_update_opt = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(
                    global_step=aggregated_global_step)
                aggregated_update = aggregated_update_opt.apply_gradients(
                    [(grad_aggregated, aggregated_update_var)],
                    global_step=aggregated_global_step)
                variables.global_variables_initializer().run()
                self.assertAllClose(aggregated_update_var.eval(),
                                    repeated_index_update_var.eval())
                for _ in range(3):
                    repeated_update.run()
                    aggregated_update.run()
                    self.assertAllClose(aggregated_update_var.eval(),
                                        repeated_index_update_var.eval())
Exemplo n.º 3
0
 def testSlotsUniqueEager(self):
     with context.eager_mode():
         v1 = resource_variable_ops.ResourceVariable(1.)
         v2 = resource_variable_ops.ResourceVariable(1.)
         opt = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(1.)
         opt.minimize(lambda: v1 + v2)
         # There should be two non-slot variables, and two unique slot variables
         # for v1 and v2 respectively.
         self.assertLen(set(opt.variables()), 4)
Exemplo n.º 4
0
    def testSharing(self):
        for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
            with self.cached_session():
                global_step = variables.Variable(
                    array_ops.zeros([], dtypes.int64))
                # Initialize variables for numpy implementation.
                m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
                var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
                grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
                var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
                grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

                var0 = variables.Variable(var0_np)
                var1 = variables.Variable(var1_np)
                grads0 = constant_op.constant(grads0_np)
                grads1 = constant_op.constant(grads1_np)
                opt = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(
                    global_step=global_step)
                update1 = opt.apply_gradients(zip([grads0, grads1],
                                                  [var0, var1]),
                                              global_step=global_step)
                update2 = opt.apply_gradients(zip([grads0, grads1],
                                                  [var0, var1]),
                                              global_step=global_step)
                variables.global_variables_initializer().run()

                beta1_power, beta2_power = opt._get_beta_accumulators()

                # Fetch params to validate initial values
                self.assertAllClose([1.0, 2.0], var0.eval())
                self.assertAllClose([3.0, 4.0], var1.eval())

                # Run 3 steps of intertwined Adam1 and Adam2.
                for t in range(1, 4):
                    self.assertAllCloseAccordingToType(0.9**t,
                                                       beta1_power.eval())
                    self.assertAllCloseAccordingToType(0.999**t,
                                                       beta2_power.eval())
                    if t % 2 == 0:
                        update1.run()
                    else:
                        update2.run()

                    var0_np, m0, v0 = adam_update_numpy(
                        var0_np, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adam_update_numpy(
                        var1_np, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np, var0.eval())
                    self.assertAllCloseAccordingToType(var1_np, var1.eval())
Exemplo n.º 5
0
    def testTwoSessions(self):
        optimizer = lazy_adam_gs_optimizer.LazyAdamGSOptimizer()

        with context.eager_mode():
            var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
            grads0 = constant_op.constant(np.array([0.1, 0.1]))
            optimizer.apply_gradients([(grads0, var0)])

        g = ops.Graph()
        with g.as_default():
            with self.session(graph=g):
                var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
                grads0 = constant_op.constant(np.array([0.1, 0.1]))
                optimizer.apply_gradients([(grads0, var0)])

        gg = ops.Graph()
        with gg.as_default():
            with self.session(graph=gg):
                var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
                grads0 = constant_op.constant(np.array([0.1, 0.1]))

                # If the optimizer saves any state not keyed by graph the following line
                # fails.
                optimizer.apply_gradients([(grads0, var0)])
Exemplo n.º 6
0
    def testSparseDevicePlacement(self, use_resource):
        for index_dtype in [dtypes.int32, dtypes.int64]:
            with self.cached_session(force_gpu=test.is_gpu_available()):
                # If a GPU is available, tests that all optimizer ops can be placed on
                # it (i.e. they have GPU kernels).
                if use_resource:
                    global_step = resource_variable_ops.ResourceVariable(
                        array_ops.zeros([], dtypes.int64))
                    var = resource_variable_ops.ResourceVariable([[1.0],
                                                                  [2.0]])
                else:
                    global_step = variables.Variable(
                        array_ops.zeros([], dtypes.int64))
                    var = variables.Variable([[1.0], [2.0]])

                indices = constant_op.constant([0, 1], dtype=index_dtype)
                gathered_sum = math_ops.reduce_sum(
                    array_ops.gather(var, indices))
                optimizer = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(
                    global_step=global_step, learning_rate=3.0)
                minimize_op = optimizer.minimize(gathered_sum,
                                                 global_step=global_step)
                variables.global_variables_initializer().run()
                minimize_op.run()
Exemplo n.º 7
0
    def doTestBasic(self, use_resource=False, use_callable_params=False):
        for i, dtype in enumerate(
            [dtypes.half, dtypes.float32, dtypes.float64]):
            with self.session(graph=ops.Graph()):
                # Initialize variables for numpy implementation.
                m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
                var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
                grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
                var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
                grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

                if use_resource:
                    global_step = resource_variable_ops.ResourceVariable(
                        array_ops.zeros([], dtypes.int64),
                        name="global_step_%d" % i)
                    var0 = resource_variable_ops.ResourceVariable(
                        var0_np, name="var0_%d" % i)
                    var1 = resource_variable_ops.ResourceVariable(
                        var1_np, name="var1_%d" % i)
                else:
                    global_step = variables.Variable(
                        array_ops.zeros([], dtypes.int64))
                    var0 = variables.Variable(var0_np)
                    var1 = variables.Variable(var1_np)
                grads0 = constant_op.constant(grads0_np)
                grads1 = constant_op.constant(grads1_np)

                learning_rate = lambda: 0.001
                beta1 = lambda: 0.9
                beta2 = lambda: 0.999
                epsilon = lambda: 1e-8
                if not use_callable_params:
                    learning_rate = learning_rate()
                    beta1 = beta1()
                    beta2 = beta2()
                    epsilon = epsilon()

                opt = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(
                    global_step=global_step, learning_rate=learning_rate)
                update = opt.apply_gradients(zip([grads0, grads1],
                                                 [var0, var1]),
                                             global_step=global_step)
                opt_variables = opt.variables()
                beta1_power, beta2_power = opt._get_beta_accumulators()
                self.assertIsNotNone(beta1_power)
                self.assertIsNotNone(beta2_power is not None)
                self.assertNotIn(beta1_power, opt_variables)
                self.assertNotIn(beta2_power, opt_variables)

                if not context.executing_eagerly():
                    with ops.Graph().as_default():
                        # Shouldn't return non-slot variables from other graphs.
                        self.assertEqual(0, len(opt.variables()))
                    self.evaluate(variables.global_variables_initializer())
                    # Fetch params to validate initial values
                    self.assertAllClose([1.0, 2.0], self.evaluate(var0))
                    self.assertAllClose([3.0, 4.0], self.evaluate(var1))

                # Run 3 steps of Adam
                for t in range(1, 4):
                    if not context.executing_eagerly():
                        self.evaluate(update)
                        self.assertAllCloseAccordingToType(
                            0.9**(t + 1), self.evaluate(beta1_power))
                        self.assertAllCloseAccordingToType(
                            0.999**(t + 1), self.evaluate(beta2_power))
                    else:
                        if t > 1:
                            opt.apply_gradients(zip([grads0, grads1],
                                                    [var0, var1]),
                                                global_step=global_step)
                            beta1_power, beta2_power = opt._get_beta_accumulators(
                            )
                            self.assertAllCloseAccordingToType(
                                0.9**t, self.evaluate(beta1_power))
                            self.assertAllCloseAccordingToType(
                                0.999**t, self.evaluate(beta2_power))

                    var0_np, m0, v0 = adam_update_numpy(
                        var0_np, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adam_update_numpy(
                        var1_np, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np,
                                                       self.evaluate(var0))
                    self.assertAllCloseAccordingToType(var1_np,
                                                       self.evaluate(var1))
                    if use_resource:
                        self.assertEqual("var0_%d/Adam:0" % (i, ),
                                         opt.get_slot(var=var0, name="m").name)