コード例 #1
0
    def testConstructAdadeltaWithEpsilonValues(self):
        opt = adadelta.Adadelta(epsilon=None)
        config = opt.get_config()
        self.assertEqual(config["epsilon"], 1e-7)

        opt = adadelta.Adadelta(epsilon=1e-8)
        config = opt.get_config()
        self.assertEqual(config["epsilon"], 1e-8)
コード例 #2
0
 def testConstructAdadeltaWithLR(self):
     opt = adadelta.Adadelta(lr=1.0, rho=0.9, epsilon=1.)
     self.assertEqual(opt.lr, 1.0)
     opt_2 = adadelta.Adadelta(learning_rate=0.1,
                               rho=0.9,
                               epsilon=1.,
                               lr=1.0)
     self.assertEqual(opt_2.lr, 1.0)
     opt_3 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1.)
     self.assertEqual(opt_3.lr, 0.1)
コード例 #3
0
ファイル: adadelta_test.py プロジェクト: MFChunga/poo
  def testConstructAdadeltaWithLR(self):
    opt = adadelta.Adadelta(lr=1.0, rho=0.9, epsilon=1.)
    opt_2 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1., lr=1.0)
    opt_3 = adadelta.Adadelta(learning_rate=0.1, rho=0.9, epsilon=1.)
    self.assertIsInstance(opt.lr, variables.Variable)
    self.assertIsInstance(opt_2.lr, variables.Variable)
    self.assertIsInstance(opt_3.lr, variables.Variable)

    self.evaluate(variables.global_variables_initializer())
    self.assertAllClose(self.evaluate(opt.lr), (1.0))
    self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
    self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
コード例 #4
0
    def testConfig(self):
        def rho():
            return ops.convert_to_tensor(1.0)

        epsilon = ops.convert_to_tensor(1.0)

        opt = adadelta.Adadelta(learning_rate=1.0, rho=rho, epsilon=epsilon)
        config = opt.get_config()
        opt2 = adadelta.Adadelta.from_config(config)
        self.assertEqual(opt._hyper["learning_rate"][1],
                         opt2._hyper["learning_rate"][1])
        self.assertEqual(opt._hyper["rho"][1].__name__,
                         opt2._hyper["rho"][1].__name__)
        self.assertEqual(opt._hyper["epsilon"][1], opt2._hyper["epsilon"][1])
コード例 #5
0
 def testMinimizeSparseResourceVariable(self):
   for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
     with self.cached_session():
       var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
       x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
       pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
       loss = pred * pred
       sgd_op = adadelta.Adadelta(1.0, 1.0, 1.0).minimize(loss)
       variables.global_variables_initializer().run()
       # Fetch params to validate initial values
       self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
       # Run 1 step of sgd
       sgd_op.run()
       # Validate updated params
       self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0))
コード例 #6
0
  def testMinimizeSparseResourceVariable(self):
    for dtype in _DATA_TYPES:
      var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
      x = constant_op.constant([[4.0], [5.0]], dtype=dtype)

      def loss():
        pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)  # pylint: disable=cell-var-from-loop
        return pred * pred

      sgd_op = adadelta.Adadelta(1.0, 1.0, 1.0).minimize(loss, var_list=[var0])
      self.evaluate(variables.global_variables_initializer())
      # Fetch params to validate initial values
      self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
      # Run 1 step of sgd
      self.evaluate(sgd_op)
      # Validate updated params
      self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0))
コード例 #7
0
gradient_descent_optimizer_v1_fn = combinations.NamedObject(
    "GradientDescentV1",
    lambda: gradient_descent.GradientDescentOptimizer(0.2))
adagrad_optimizer_v1_fn = combinations.NamedObject(
    "AdagradV1", lambda: adagrad.AdagradOptimizer(0.001))
adam_optimizer_v1_fn = combinations.NamedObject(
    "AdamV1", lambda: adam.AdamOptimizer(0.001, epsilon=1))
rmsprop_optimizer_v1_fn = combinations.NamedObject(
    "RmsPropV1", lambda: rmsprop.RMSPropOptimizer(0.001))

# TODO(shiningsun): consider adding the other v1 optimizers
optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn]

adadelta_optimizer_keras_v2_fn = combinations.NamedObject(
    "AdadeltaKerasV2", lambda: adadelta_keras_v2.Adadelta(0.001))
adagrad_optimizer_keras_v2_fn = combinations.NamedObject(
    "AdagradKerasV2", lambda: adagrad_keras_v2.Adagrad(0.001))
adam_optimizer_keras_v2_fn = combinations.NamedObject(
    "AdamKerasV2", lambda: adam_keras_v2.Adam(0.001, epsilon=1.0))
adamax_optimizer_keras_v2_fn = combinations.NamedObject(
    "AdamaxKerasV2", lambda: adamax_keras_v2.Adamax(0.001, epsilon=1.0))
nadam_optimizer_keras_v2_fn = combinations.NamedObject(
    "NadamKerasV2", lambda: nadam_keras_v2.Nadam(0.001, epsilon=1.0))
ftrl_optimizer_keras_v2_fn = combinations.NamedObject(
    "FtrlKerasV2", lambda: ftrl_keras_v2.Ftrl(0.001))
gradient_descent_optimizer_keras_v2_fn = combinations.NamedObject(
    "GradientDescentKerasV2", lambda: gradient_descent_keras_v2.SGD(0.2))
rmsprop_optimizer_keras_v2_fn = combinations.NamedObject(
    "RmsPropKerasV2", lambda: rmsprop_keras_v2.RMSprop(0.001))
コード例 #8
0
 def testAdadeltaCompatibility(self):
     opt_v1 = optimizers.Adadelta(lr=0.01)
     opt_v2 = adadelta.Adadelta(learning_rate=0.01)
     self._testOptimizersCompatibility(opt_v1, opt_v2)
コード例 #9
0
    def doTestBasic(self, use_resource=False, use_callable_params=False):
        num_updates = 4  # number of ADADELTA steps to perform
        for dtype in [dtypes.half, dtypes.float32]:
            for grad in [0.2, 0.1, 0.01]:
                for lr in [1.0, 0.5, 0.1]:
                    var0_init = [1.0, 2.0]
                    var1_init = [3.0, 4.0]
                    if use_resource:
                        var0 = resource_variable_ops.ResourceVariable(
                            var0_init, dtype=dtype)
                        var1 = resource_variable_ops.ResourceVariable(
                            var1_init, dtype=dtype)
                    else:
                        var0 = variables.Variable(var0_init, dtype=dtype)
                        var1 = variables.Variable(var1_init, dtype=dtype)

                    grads = constant_op.constant([grad, grad], dtype=dtype)

                    accum = 0.0
                    accum_update = 0.0

                    # ADADELTA gradient optimizer
                    rho = 0.95
                    epsilon = 1e-8
                    if use_callable_params:
                        adadelta_opt = adadelta.Adadelta(
                            learning_rate=lambda: lr,  # pylint: disable=cell-var-from-loop
                            rho=lambda: rho,  # pylint: disable=cell-var-from-loop
                            epsilon=lambda: epsilon)  # pylint: disable=cell-var-from-loop
                    else:
                        adadelta_opt = adadelta.Adadelta(learning_rate=lr,
                                                         rho=rho,
                                                         epsilon=epsilon)
                    if not context.executing_eagerly():
                        adadelta_update = adadelta_opt.apply_gradients(
                            zip([grads, grads], [var0, var1]))
                        self.evaluate(variables.global_variables_initializer())

                        # Assign slots
                        slot = [None] * 2
                        slot_update = [None] * 2
                        slot[0] = adadelta_opt.get_slot(var0, "accum_grad")
                        self.assertEqual(slot[0].get_shape(), var0.get_shape())

                        slot_update[0] = adadelta_opt.get_slot(
                            var0, "accum_var")
                        self.assertEqual(slot_update[0].get_shape(),
                                         var0.get_shape())

                        slot[1] = adadelta_opt.get_slot(var1, "accum_grad")
                        self.assertEqual(slot[1].get_shape(), var1.get_shape())

                        slot_update[1] = adadelta_opt.get_slot(
                            var1, "accum_var")
                        self.assertEqual(slot_update[1].get_shape(),
                                         var1.get_shape())

                    # Fetch params to validate initial values
                    self.assertAllClose(var0_init, self.evaluate(var0))
                    self.assertAllClose(var1_init, self.evaluate(var1))

                    update = [None] * num_updates
                    tot_update = 0
                    for step in range(num_updates):
                        # Run adadelta update for comparison
                        if not context.executing_eagerly():
                            self.evaluate(adadelta_update)
                        else:
                            adadelta_opt.apply_gradients(
                                zip([grads, grads], [var0, var1]))

                        # Perform initial update without previous accum values
                        accum = accum * rho + (grad**2) * (1 - rho)
                        update[step] = (np.sqrt(accum_update + epsilon) *
                                        (1. / np.sqrt(accum + epsilon)) * grad)
                        accum_update = (accum_update * rho +
                                        (update[step]**2) * (1.0 - rho))
                        tot_update += update[step] * lr

                        if not context.executing_eagerly():
                            # Check that the accumulators have been updated
                            # TODO(lxuechen): This is hard to test in eager mode
                            for slot_idx in range(2):
                                self.assertAllCloseAccordingToType(
                                    np.array([accum, accum],
                                             dtype=dtype.as_numpy_dtype()),
                                    self.evaluate(slot[slot_idx]),
                                    rtol=1e-5)

                                self.assertAllCloseAccordingToType(
                                    np.array([accum_update, accum_update],
                                             dtype=dtype.as_numpy_dtype()),
                                    self.evaluate(slot_update[slot_idx]),
                                    rtol=1e-5)

                            # Check that the parameters have been updated
                            self.assertAllCloseAccordingToType(
                                np.array([
                                    var0_init[0] - tot_update,
                                    var0_init[1] - tot_update
                                ],
                                         dtype=dtype.as_numpy_dtype()),
                                self.evaluate(var0),
                                rtol=1e-5)

                            self.assertAllCloseAccordingToType(
                                np.array([
                                    var1_init[0] - tot_update,
                                    var1_init[1] - tot_update
                                ],
                                         dtype=dtype.as_numpy_dtype()),
                                self.evaluate(var1),
                                rtol=1e-5)
コード例 #10
0
ファイル: adadelta_test.py プロジェクト: chrisvon62/AiBot
    def testConstructAdadeltaWithEpsilonValues(self):
        opt = adadelta.Adadelta(epsilon=None)
        self.assertEqual(opt.epsilon, 1e-7)

        opt = adadelta.Adadelta(epsilon=1e-8)
        self.assertEqual(opt.epsilon, 1e-8)
コード例 #11
0
  def doTestBasic(self, use_resource=False):
    num_updates = 4  # number of ADADELTA steps to perform
    for dtype in [dtypes.half, dtypes.float32]:
      for grad in [0.2, 0.1, 0.01]:
        for lr in [1.0, 0.5, 0.1]:
          with self.cached_session():
            var0_init = [1.0, 2.0]
            var1_init = [3.0, 4.0]
            if use_resource:
              var0 = resource_variable_ops.ResourceVariable(
                  var0_init, dtype=dtype)
              var1 = resource_variable_ops.ResourceVariable(
                  var1_init, dtype=dtype)
            else:
              var0 = variables.Variable(var0_init, dtype=dtype)
              var1 = variables.Variable(var1_init, dtype=dtype)

            grads = constant_op.constant([grad, grad], dtype=dtype)

            accum = 0.0
            accum_update = 0.0

            # ADADELTA gradient optimizer
            rho = 0.95
            epsilon = 1e-8
            adadelta_opt = adadelta.Adadelta(lr, rho, epsilon)
            adadelta_update = adadelta_opt.apply_gradients(
                zip([grads, grads], [var0, var1]))

            opt_vars = adadelta_opt.variables()
            self.assertStartsWith(opt_vars[0].name, var0._shared_name)
            self.assertStartsWith(opt_vars[1].name, var0._shared_name)
            self.assertStartsWith(opt_vars[2].name, var1._shared_name)
            self.assertStartsWith(opt_vars[3].name, var1._shared_name)
            self.assertEqual(4, len(opt_vars))

            variables.global_variables_initializer().run()

            # Assign slots
            slot = [None] * 2
            slot_update = [None] * 2
            slot[0] = adadelta_opt.get_slot(var0, "accum_grad")
            self.assertEqual(slot[0].shape, var0.shape)

            slot_update[0] = adadelta_opt.get_slot(var0, "accum_var")
            self.assertEqual(slot_update[0].shape, var0.shape)

            slot[1] = adadelta_opt.get_slot(var1, "accum_grad")
            self.assertEqual(slot[1].shape, var1.shape)

            slot_update[1] = adadelta_opt.get_slot(var1, "accum_var")
            self.assertEqual(slot_update[1].shape, var1.shape)

          # Fetch params to validate initial values
          self.assertAllClose(var0_init, self.evaluate(var0))
          self.assertAllClose(var1_init, self.evaluate(var1))

              # Check that the accumulators have been updated
              for slot_idx in range(2):
                self.assertAllCloseAccordingToType(
                    np.array([accum, accum], dtype=dtype.as_numpy_dtype()),
                    slot[slot_idx].eval(),
                    rtol=1e-5)

                self.assertAllCloseAccordingToType(
                    np.array(
                        [accum_update, accum_update],
                        dtype=dtype.as_numpy_dtype()),
                    slot_update[slot_idx].eval(),
                    rtol=1e-5)

              # Check that the parameters have been updated
              self.assertAllCloseAccordingToType(
                  np.array(
                      [var0_init[0] - tot_update, var0_init[1] - tot_update],
                      dtype=dtype.as_numpy_dtype()),
                  var0.eval(),
                  rtol=1e-5)

              self.assertAllCloseAccordingToType(
                  np.array(
                      [var1_init[0] - tot_update, var1_init[1] - tot_update],
                      dtype=dtype.as_numpy_dtype()),
                  var1.eval(),
                  rtol=1e-5)
コード例 #12
0
    def doTestBasic(self, use_resource=False):
        num_updates = 4  # number of ADADELTA steps to perform
        for dtype in [dtypes.half, dtypes.float32]:
            for grad in [0.2, 0.1, 0.01]:
                for lr in [1.0, 0.5, 0.1]:
                    with self.cached_session():
                        var0_init = [1.0, 2.0]
                        var1_init = [3.0, 4.0]
                        if use_resource:
                            var0 = resource_variable_ops.ResourceVariable(
                                var0_init, dtype=dtype)
                            var1 = resource_variable_ops.ResourceVariable(
                                var1_init, dtype=dtype)
                        else:
                            var0 = variables.Variable(var0_init, dtype=dtype)
                            var1 = variables.Variable(var1_init, dtype=dtype)

                        grads = constant_op.constant([grad, grad], dtype=dtype)

                        accum = 0.0
                        accum_update = 0.0

                        # ADADELTA gradient optimizer
                        rho = 0.95
                        epsilon = 1e-8
                        adadelta_opt = adadelta.Adadelta(lr, rho, epsilon)
                        adadelta_update = adadelta_opt.apply_gradients(
                            zip([grads, grads], [var0, var1]))

                        opt_vars = adadelta_opt.variables()
                        self.assertStartsWith(opt_vars[0].name,
                                              var0._shared_name)
                        self.assertStartsWith(opt_vars[1].name,
                                              var0._shared_name)
                        self.assertStartsWith(opt_vars[2].name,
                                              var1._shared_name)
                        self.assertStartsWith(opt_vars[3].name,
                                              var1._shared_name)
                        self.assertEqual(4, len(opt_vars))

                        variables.global_variables_initializer().run()

                        # Assign slots
                        slot = [None] * 2
                        slot_update = [None] * 2
                        self.assertEqual(["accum", "accum_update"],
                                         adadelta_opt.get_slot_names())
                        slot[0] = adadelta_opt.get_slot(var0, "accum")
                        self.assertEquals(slot[0].get_shape(),
                                          var0.get_shape())
                        self.assertFalse(
                            slot[0] in variables.trainable_variables())

                        slot_update[0] = adadelta_opt.get_slot(
                            var0, "accum_update")
                        self.assertEquals(slot_update[0].get_shape(),
                                          var0.get_shape())
                        self.assertFalse(
                            slot_update[0] in variables.trainable_variables())

                        slot[1] = adadelta_opt.get_slot(var1, "accum")
                        self.assertEquals(slot[1].get_shape(),
                                          var1.get_shape())
                        self.assertFalse(
                            slot[1] in variables.trainable_variables())

                        slot_update[1] = adadelta_opt.get_slot(
                            var1, "accum_update")
                        self.assertEquals(slot_update[1].get_shape(),
                                          var1.get_shape())
                        self.assertFalse(
                            slot_update[1] in variables.trainable_variables())

                        # Fetch params to validate initial values
                        self.assertAllClose(var0_init, var0.eval())
                        self.assertAllClose(var1_init, var1.eval())

                        update = [None] * num_updates
                        tot_update = 0
                        for step in range(num_updates):
                            # Run adadelta update for comparison
                            adadelta_update.run()

                            # Perform initial update without previous accum values
                            accum = accum * rho + (grad**2) * (1 - rho)
                            update[step] = (np.sqrt(accum_update + epsilon) *
                                            (1. / np.sqrt(accum + epsilon)) *
                                            grad)
                            accum_update = (accum_update * rho +
                                            (update[step]**2) * (1.0 - rho))
                            tot_update += update[step] * lr

                            # Check that the accumulators have been updated
                            for slot_idx in range(2):
                                self.assertAllCloseAccordingToType(
                                    np.array([accum, accum],
                                             dtype=dtype.as_numpy_dtype()),
                                    slot[slot_idx].eval(),
                                    rtol=1e-5)

                                self.assertAllCloseAccordingToType(
                                    np.array([accum_update, accum_update],
                                             dtype=dtype.as_numpy_dtype()),
                                    slot_update[slot_idx].eval(),
                                    rtol=1e-5)

                            # Check that the parameters have been updated
                            self.assertAllCloseAccordingToType(np.array(
                                [
                                    var0_init[0] - tot_update,
                                    var0_init[1] - tot_update
                                ],
                                dtype=dtype.as_numpy_dtype()),
                                                               var0.eval(),
                                                               rtol=1e-5)

                            self.assertAllCloseAccordingToType(np.array(
                                [
                                    var1_init[0] - tot_update,
                                    var1_init[1] - tot_update
                                ],
                                dtype=dtype.as_numpy_dtype()),
                                                               var1.eval(),
                                                               rtol=1e-5)