Beispiel #1
0
 def test_adam8p_dense(self):
     opt = adam8p.Adam8POptimizer(learning_rate=1e-2)
     v = tf.get_variable(name='weights',
                         shape=[10],
                         dtype=tf.float32,
                         initializer=tf.initializers.random_normal())
     loss = tf.reduce_mean(v**2)
     global_step = tf.train.get_or_create_global_step()
     train_op = opt.minimize(loss, var_list=[v], global_step=global_step)
     with self.cached_session() as sess:
         sess.run(tf.initializers.global_variables())
         initial_loss = sess.run(loss)
         for _ in range(100):
             sess.run(train_op)
         final_loss = sess.run(loss)
         self.assertLess(final_loss, 0.20804106)
         self.assertGreater(initial_loss, 0.50804106)
Beispiel #2
0
 def test_adam8p_sparse(self):
     opt = adam8p.Adam8POptimizer(learning_rate=1e-2)
     dense_var = tf.get_variable(
         name='weights',
         shape=[20, 20],
         dtype=tf.float32,
         initializer=tf.initializers.random_normal())
     loss = tf.reduce_mean(
         tf.nn.embedding_lookup(dense_var, np.arange(5, dtype=np.int32))**2)
     global_step = tf.train.get_or_create_global_step()
     train_op = opt.minimize(loss,
                             var_list=[dense_var],
                             global_step=global_step)
     with self.cached_session() as sess:
         sess.run(tf.initializers.global_variables())
         initial_loss = sess.run(loss)
         for _ in range(100):
             sess.run(train_op)
         final_loss = sess.run(loss)
         print(final_loss, initial_loss)
         self.assertLess(final_loss, 0.4)
         self.assertGreater(initial_loss, 1.0)