Example #1
0
 def testSparseRepeatedIndices(self):
     # TODO(tanzheny, omalleyt): Fix test in eager mode.
     for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
         with ops.Graph().as_default(), self.cached_session():
             repeated_index_update_var = variables.Variable([[1.0], [2.0]],
                                                            dtype=dtype)
             aggregated_update_var = variables.Variable([[1.0], [2.0]],
                                                        dtype=dtype)
             grad_repeated_index = indexed_slices.IndexedSlices(
                 constant_op.constant([0.1, 0.1], shape=[2, 1],
                                      dtype=dtype),
                 constant_op.constant([1, 1]), constant_op.constant([2, 1]))
             grad_aggregated = indexed_slices.IndexedSlices(
                 constant_op.constant([0.2], shape=[1, 1], dtype=dtype),
                 constant_op.constant([1]), constant_op.constant([2, 1]))
             repeated_update = adamax.Adamax().apply_gradients([
                 (grad_repeated_index, repeated_index_update_var)
             ])
             aggregated_update = adamax.Adamax().apply_gradients([
                 (grad_aggregated, aggregated_update_var)
             ])
             self.evaluate(variables.global_variables_initializer())
             self.assertAllClose(aggregated_update_var,
                                 repeated_index_update_var.eval())
             for _ in range(3):
                 repeated_update.run()
                 aggregated_update.run()
                 self.assertAllClose(aggregated_update_var,
                                     repeated_index_update_var.eval())
Example #2
0
 def testSparseRepeatedIndices(self):
   for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
     with self.cached_session():
       repeated_index_update_var = variables.Variable(
           [[1.0], [2.0]], dtype=dtype)
       aggregated_update_var = variables.Variable(
           [[1.0], [2.0]], dtype=dtype)
       grad_repeated_index = ops.IndexedSlices(
           constant_op.constant(
               [0.1, 0.1], shape=[2, 1], dtype=dtype),
           constant_op.constant([1, 1]),
           constant_op.constant([2, 1]))
       grad_aggregated = ops.IndexedSlices(
           constant_op.constant(
               [0.2], shape=[1, 1], dtype=dtype),
           constant_op.constant([1]),
           constant_op.constant([2, 1]))
       repeated_update = adamax.Adamax().apply_gradients(
           [(grad_repeated_index, repeated_index_update_var)])
       aggregated_update = adamax.Adamax().apply_gradients(
           [(grad_aggregated, aggregated_update_var)])
       variables.global_variables_initializer().run()
       self.assertAllClose(aggregated_update_var.eval(),
                           repeated_index_update_var.eval())
       for _ in range(3):
         repeated_update.run()
         aggregated_update.run()
         self.assertAllClose(aggregated_update_var.eval(),
                             repeated_index_update_var.eval())
Example #3
0
 def testConstructAdamaxWithLR(self):
   opt = adamax.Adamax(lr=1.0)
   self.assertEqual(opt.lr, 1.0)
   opt_2 = adamax.Adamax(learning_rate=0.1, lr=1.0)
   self.assertEqual(opt_2.lr, 1.0)
   opt_3 = adamax.Adamax(learning_rate=0.1)
   self.assertEqual(opt_3.lr, 0.1)
Example #4
0
  def testConstructAdamaxWithEpsilonValues(self):
    opt = adamax.Adamax(epsilon=None)
    config = opt.get_config()
    self.assertEqual(config["epsilon"], 1e-7)

    opt = adamax.Adamax(epsilon=1e-8)
    config = opt.get_config()
    self.assertEqual(config["epsilon"], 1e-8)
Example #5
0
  def testConstructAdamaxWithLR(self):
    opt = adamax.Adamax(lr=1.0)
    opt_2 = adamax.Adamax(learning_rate=0.1, lr=1.0)
    opt_3 = adamax.Adamax(learning_rate=0.1)
    self.assertIsInstance(opt.lr, variables.Variable)
    self.assertIsInstance(opt_2.lr, variables.Variable)
    self.assertIsInstance(opt_3.lr, variables.Variable)

    self.evaluate(variables.global_variables_initializer())
    self.assertAllClose(self.evaluate(opt.lr), (1.0))
    self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
    self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
Example #6
0
  def testTensorLearningRate(self):
    for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
      with self.cached_session():
        # Initialize variables for numpy implementation.
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
        grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
        grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

        var0 = variables.Variable(var0_np)
        var1 = variables.Variable(var1_np)
        grads0 = constant_op.constant(grads0_np)
        grads1 = constant_op.constant(grads1_np)
        opt = adamax.Adamax(constant_op.constant(0.001))
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        variables.global_variables_initializer().run()

        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())

        beta1_power = get_beta_accumulators(opt, dtype)

        # Run 3 steps of Adamax
        for t in range(3):
          self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
          update.run()

          var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
          var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)

          # Validate updated params
          self.assertAllCloseAccordingToType(var0_np, var0.eval())
          self.assertAllCloseAccordingToType(var1_np, var1.eval())
Example #7
0
 def testSlotsUniqueEager(self):
     v1 = variables.Variable(1.)
     v2 = variables.Variable(1.)
     opt = adamax.Adamax(1.)
     opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
     # There should be iteration, and two unique slot variables for v1 and v2.
     self.assertLen({id(v) for v in opt.variables()}, 5)
Example #8
0
 def testSlotsUniqueEager(self):
   with context.eager_mode():
     v1 = resource_variable_ops.ResourceVariable(1.)
     v2 = resource_variable_ops.ResourceVariable(1.)
     opt = adamax.Adamax(1.)
     opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
     # There should be iteration, and two unique slot variables for v1 and v2.
     self.assertEqual(5, len(set(opt.variables())))
Example #9
0
    def testBasic(self):
        for i, dtype in enumerate(
            [dtypes.half, dtypes.float32, dtypes.float64]):
            with self.session(graph=ops.Graph(), use_gpu=True):
                # Initialize variables for numpy implementation.
                m0 = np.array([0.0, 0.0])
                v0 = np.array([0.0, 0.0])
                m1 = np.array([0.0, 0.0])
                v1 = np.array([0.0, 0.0])
                var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
                grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
                var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
                grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

                var0 = resource_variable_ops.ResourceVariable(var0_np,
                                                              name="var0_%d" %
                                                              i)
                var1 = resource_variable_ops.ResourceVariable(var1_np,
                                                              name="var1_%d" %
                                                              i)

                grads0 = constant_op.constant(grads0_np)
                grads1 = constant_op.constant(grads1_np)

                opt = adamax.Adamax()
                if not context.executing_eagerly():
                    update = opt.apply_gradients(
                        zip([grads0, grads1], [var0, var1]))

                if not context.executing_eagerly():
                    self.evaluate(variables.global_variables_initializer())
                    # Fetch params to validate initial values
                    self.assertAllClose([1.0, 2.0], self.evaluate(var0))
                    self.assertAllClose([3.0, 4.0], self.evaluate(var1))

                # Run 3 steps of Adamax
                for t in range(3):
                    beta_1_power = get_beta_accumulators(opt, dtype)
                    self.assertAllCloseAccordingToType(
                        0.9**(t + 1), self.evaluate(beta_1_power))
                    if not context.executing_eagerly():
                        self.evaluate(update)
                    else:
                        opt.apply_gradients(zip([grads0, grads1],
                                                [var0, var1]))

                    var0_np, m0, v0 = adamax_update_numpy(
                        var0_np, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adamax_update_numpy(
                        var1_np, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np,
                                                       self.evaluate(var0),
                                                       rtol=1e-2)
                    self.assertAllCloseAccordingToType(var1_np,
                                                       self.evaluate(var1),
                                                       rtol=1e-2)
Example #10
0
 def testSparseDevicePlacement(self):
   for index_dtype in [dtypes.int32, dtypes.int64]:
     with self.cached_session(force_gpu=test.is_gpu_available()):
       # If a GPU is available, tests that all optimizer ops can be placed on
       # it (i.e. they have GPU kernels).
       var = variables.Variable([[1.0], [2.0]])
       indices = constant_op.constant([0, 1], dtype=index_dtype)
       g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices))  # pylint: disable=cell-var-from-loop
       optimizer = adamax.Adamax(3.0)
       minimize_op = optimizer.minimize(g_sum, var_list=[var])
       variables.global_variables_initializer().run()
       minimize_op.run()
Example #11
0
    def testResourceSparse(self):
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
            with ops.Graph().as_default(), self.cached_session(use_gpu=True):
                # Initialize variables for numpy implementation.
                zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype)  # pylint: disable=cell-var-from-loop
                m0, v0, m1, v1 = zero_slots(), zero_slots(), zero_slots(
                ), zero_slots()
                var0_np = np.array([1.0, 2.0, 3.0], dtype=dtype.as_numpy_dtype)
                grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
                var1_np = np.array([4.0, 5.0, 6.0], dtype=dtype.as_numpy_dtype)
                grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

                var0 = resource_variable_ops.ResourceVariable(var0_np)
                var1 = resource_variable_ops.ResourceVariable(var1_np)

                grads0_np_indices = np.array([0, 1], dtype=np.int32)
                grads0 = ops.IndexedSlices(
                    constant_op.constant(grads0_np),
                    constant_op.constant(grads0_np_indices),
                    constant_op.constant([3]))
                grads1_np_indices = np.array([2, 1], dtype=np.int32)
                grads1 = ops.IndexedSlices(
                    constant_op.constant(grads1_np),
                    constant_op.constant(grads1_np_indices),
                    constant_op.constant([3]))
                opt = adamax.Adamax()
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                variables.global_variables_initializer().run()

                # Fetch params to validate initial values
                self.assertAllClose([1.0, 2.0, 3.0], var0.eval())
                self.assertAllClose([4.0, 5.0, 6.0], var1.eval())

                beta1_power = get_beta_accumulators(opt, dtype)

                # Run 3 steps of Adamax
                for t in range(3):
                    self.assertAllCloseAccordingToType(0.9**(t + 1),
                                                       beta1_power.eval())
                    update.run()

                    var0_np, m0, v0 = adamax_sparse_update_numpy(
                        var0_np, grads0_np_indices, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adamax_sparse_update_numpy(
                        var1_np, grads1_np_indices, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np, var0.eval())
                    self.assertAllCloseAccordingToType(var1_np, var1.eval())
Example #12
0
    def testSharing(self):
        # TODO(tanzheny, omalleyt): Fix test in eager mode.
        for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
            with ops.Graph().as_default(), self.cached_session(use_gpu=True):
                # Initialize variables for numpy implementation.
                m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
                var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
                grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
                var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
                grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

                var0 = variables.Variable(var0_np)
                var1 = variables.Variable(var1_np)
                grads0 = constant_op.constant(grads0_np)
                grads1 = constant_op.constant(grads1_np)
                opt = adamax.Adamax()
                update1 = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                update2 = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                variables.global_variables_initializer().run()

                beta1_power = get_beta_accumulators(opt, dtype)

                # Fetch params to validate initial values
                self.assertAllClose([1.0, 2.0], var0.eval())
                self.assertAllClose([3.0, 4.0], var1.eval())

                # Run 3 steps of intertwined Adamax1 and Adamax2.
                for t in range(3):
                    self.assertAllCloseAccordingToType(0.9**(t + 1),
                                                       beta1_power.eval())
                    if t % 2 == 0:
                        update1.run()
                    else:
                        update2.run()

                    var0_np, m0, v0 = adamax_update_numpy(
                        var0_np, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adamax_update_numpy(
                        var1_np, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np, var0.eval())
                    self.assertAllCloseAccordingToType(var1_np, var1.eval())
adam_optimizer_v1_fn = combinations.NamedObject(
    "AdamV1", lambda: adam.AdamOptimizer(0.001, epsilon=1))
rmsprop_optimizer_v1_fn = combinations.NamedObject(
    "RmsPropV1", lambda: rmsprop.RMSPropOptimizer(0.001))

# TODO(shiningsun): consider adding the other v1 optimizers
optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn]

adadelta_optimizer_keras_v2_fn = combinations.NamedObject(
    "AdadeltaKerasV2", lambda: adadelta_keras_v2.Adadelta(0.001))
adagrad_optimizer_keras_v2_fn = combinations.NamedObject(
    "AdagradKerasV2", lambda: adagrad_keras_v2.Adagrad(0.001))
adam_optimizer_keras_v2_fn = combinations.NamedObject(
    "AdamKerasV2", lambda: adam_keras_v2.Adam(0.001, epsilon=1.0))
adamax_optimizer_keras_v2_fn = combinations.NamedObject(
    "AdamaxKerasV2", lambda: adamax_keras_v2.Adamax(0.001, epsilon=1.0))
nadam_optimizer_keras_v2_fn = combinations.NamedObject(
    "NadamKerasV2", lambda: nadam_keras_v2.Nadam(0.001, epsilon=1.0))
ftrl_optimizer_keras_v2_fn = combinations.NamedObject(
    "FtrlKerasV2", lambda: ftrl_keras_v2.Ftrl(0.001))
gradient_descent_optimizer_keras_v2_fn = combinations.NamedObject(
    "GradientDescentKerasV2", lambda: gradient_descent_keras_v2.SGD(0.2))
rmsprop_optimizer_keras_v2_fn = combinations.NamedObject(
    "RmsPropKerasV2", lambda: rmsprop_keras_v2.RMSprop(0.001))

# TODO(shiningsun): consider adding the other v2 optimizers
optimizers_v2 = [
    gradient_descent_optimizer_keras_v2_fn, adagrad_optimizer_keras_v2_fn
]

optimizers_v1_and_v2 = optimizers_v1 + optimizers_v2
Example #14
0
 def testAdamaxCompatibility(self):
     opt_v1 = optimizers.Adamax(lr=0.01)
     opt_v2 = adamax.Adamax(learning_rate=0.01)
     self._testOptimizersCompatibility(opt_v1, opt_v2)