예제 #1
0
 def testSparseRepeatedIndices(self):
     for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
         with self.test_session():
             repeated_index_update_var = variables.Variable([[1.0], [2.0]],
                                                            dtype=dtype)
             aggregated_update_var = variables.Variable([[1.0], [2.0]],
                                                        dtype=dtype)
             grad_repeated_index = ops.IndexedSlices(
                 constant_op.constant([0.1, 0.1], shape=[2, 1],
                                      dtype=dtype),
                 constant_op.constant([1, 1]), constant_op.constant([2, 1]))
             grad_aggregated = ops.IndexedSlices(
                 constant_op.constant([0.2], shape=[1, 1], dtype=dtype),
                 constant_op.constant([1]), constant_op.constant([2, 1]))
             repeated_update = adamax.AdaMaxOptimizer().apply_gradients([
                 (grad_repeated_index, repeated_index_update_var)
             ])
             aggregated_update = adamax.AdaMaxOptimizer().apply_gradients([
                 (grad_aggregated, aggregated_update_var)
             ])
             variables.global_variables_initializer().run()
             self.assertAllClose(aggregated_update_var.eval(),
                                 repeated_index_update_var.eval())
             for _ in range(3):
                 repeated_update.run()
                 aggregated_update.run()
                 self.assertAllClose(aggregated_update_var.eval(),
                                     repeated_index_update_var.eval())
예제 #2
0
  def testTensorLearningRate(self):
    for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
      with self.cached_session():
        # Initialize variables for numpy implementation.
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
        grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
        grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

        var0 = variables.Variable(var0_np)
        var1 = variables.Variable(var1_np)
        grads0 = constant_op.constant(grads0_np)
        grads1 = constant_op.constant(grads1_np)
        opt = adamax.AdaMaxOptimizer(constant_op.constant(0.001))
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        variables.global_variables_initializer().run()

        # Fetch params to validate initial values
        self.assertAllClose([1.0, 2.0], var0.eval())
        self.assertAllClose([3.0, 4.0], var1.eval())

        beta1_power = opt._get_beta_accumulators()

        # Run 3 steps of AdaMax
        for t in range(1, 4):
          self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
          update.run()

          var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
          var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)

          # Validate updated params
          self.assertAllCloseAccordingToType(var0_np, var0.eval())
          self.assertAllCloseAccordingToType(var1_np, var1.eval())
예제 #3
0
    def testBasic(self):
        for i, dtype in enumerate(self.float_types):
            with self.cached_session(), self.test_scope():
                variable_scope.get_variable_scope().set_use_resource(True)
                # Initialize variables for numpy implementation.
                m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
                var0_np = np.array([1.0, 2.0], dtype=dtype)
                grads0_np = np.array([0.1, 0.1], dtype=dtype)
                var1_np = np.array([3.0, 4.0], dtype=dtype)
                grads1_np = np.array([0.01, 0.01], dtype=dtype)

                var0 = resource_variable_ops.ResourceVariable(var0_np,
                                                              name="var0_%d" %
                                                              i)
                var1 = resource_variable_ops.ResourceVariable(var1_np,
                                                              name="var1_%d" %
                                                              i)
                grads0 = constant_op.constant(grads0_np)
                grads1 = constant_op.constant(grads1_np)

                opt = adamax.AdaMaxOptimizer()
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                opt_variables = opt.variables()
                beta1_power = opt._get_beta_accumulators()
                self.assertTrue(beta1_power is not None)
                self.assertIn(beta1_power, opt_variables)

                with ops.Graph().as_default():
                    # Shouldn't return non-slot variables from other graphs.
                    self.assertEqual(0, len(opt.variables()))

                variables.global_variables_initializer().run()
                # Fetch params to validate initial values
                self.assertAllClose([1.0, 2.0], self.evaluate(var0))
                self.assertAllClose([3.0, 4.0], self.evaluate(var1))

                beta1_power = opt._get_beta_accumulators()

                # Run 3 steps of AdaMax
                for t in range(1, 4):
                    update.run()

                    self.assertAllCloseAccordingToType(
                        0.9**(t + 1), self.evaluate(beta1_power))

                    var0_np, m0, v0 = adamax_update_numpy(
                        var0_np, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adamax_update_numpy(
                        var1_np, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np,
                                                       self.evaluate(var0),
                                                       rtol=1e-2)
                    self.assertAllCloseAccordingToType(var1_np,
                                                       self.evaluate(var1),
                                                       rtol=1e-2)
                    self.assertEqual("var0_%d/AdaMax:0" % (i, ),
                                     opt.get_slot(var=var0, name="m").name)
예제 #4
0
 def testSlotsUniqueEager(self):
     with context.eager_mode():
         v1 = resource_variable_ops.ResourceVariable(1.)
         v2 = resource_variable_ops.ResourceVariable(1.)
         opt = adamax.AdaMaxOptimizer(1.)
         opt.minimize(lambda: v1 + v2)
         # There should be two non-slot variables, and two unique slot variables
         # for v1 and v2 respectively.
         self.assertEqual(5, len(set(opt.variables())))
예제 #5
0
    def doTestSparse(self, use_resource=False):
        for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
            with self.test_session():
                # Initialize variables for numpy implementation.
                zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype)
                m0, v0, m1, v1 = zero_slots(), zero_slots(), zero_slots(
                ), zero_slots()
                var0_np = np.array([1.0, 2.0, 3.0], dtype=dtype.as_numpy_dtype)
                grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
                var1_np = np.array([4.0, 5.0, 6.0], dtype=dtype.as_numpy_dtype)
                grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

                if use_resource:
                    var0 = resource_variable_ops.ResourceVariable(var0_np)
                    var1 = resource_variable_ops.ResourceVariable(var1_np)
                else:
                    var0 = variables.Variable(var0_np)
                    var1 = variables.Variable(var1_np)
                grads0_np_indices = np.array([0, 1], dtype=np.int32)
                grads0 = ops.IndexedSlices(
                    constant_op.constant(grads0_np),
                    constant_op.constant(grads0_np_indices),
                    constant_op.constant([2]))
                grads1_np_indices = np.array([2, 1], dtype=np.int32)
                grads1 = ops.IndexedSlices(
                    constant_op.constant(grads1_np),
                    constant_op.constant(grads1_np_indices),
                    constant_op.constant([2]))
                opt = adamax.AdaMaxOptimizer()
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                variables.global_variables_initializer().run()

                # Fetch params to validate initial values
                self.assertAllClose([1.0, 2.0, 3.0], var0.eval())
                self.assertAllClose([4.0, 5.0, 6.0], var1.eval())

                beta1_power = opt._get_beta_accumulators()

                # Run 3 steps of AdaMax
                for t in range(1, 4):
                    self.assertAllCloseAccordingToType(0.9**t,
                                                       beta1_power.eval())
                    update.run()

                    var0_np, m0, v0 = adamax_sparse_update_numpy(
                        var0_np, grads0_np_indices, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adamax_sparse_update_numpy(
                        var1_np, grads1_np_indices, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np, var0.eval())
                    self.assertAllCloseAccordingToType(var1_np, var1.eval())
예제 #6
0
 def testSparseDevicePlacement(self):
   for index_dtype in [dtypes.int32, dtypes.int64]:
     with self.cached_session(force_gpu=test.is_gpu_available()):
       # If a GPU is available, tests that all optimizer ops can be placed on
       # it (i.e. they have GPU kernels).
       var = variables.Variable([[1.0], [2.0]])
       indices = constant_op.constant([0, 1], dtype=index_dtype)
       gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
       optimizer = adamax.AdaMaxOptimizer(3.0)
       minimize_op = optimizer.minimize(gathered_sum)
       variables.global_variables_initializer().run()
       minimize_op.run()
예제 #7
0
    def testTwoSessions(self):
        optimizer = adamax.AdaMaxOptimizer()
        g = ops.Graph()
        with g.as_default():
            with session.Session():
                var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
                grads0 = constant_op.constant(np.array([0.1, 0.1]))
                optimizer.apply_gradients([(grads0, var0)])

        gg = ops.Graph()
        with gg.as_default():
            with session.Session():
                var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
                grads0 = constant_op.constant(np.array([0.1, 0.1]))

                # If the optimizer saves any state not keyed by graph the following line
                # fails.
                optimizer.apply_gradients([(grads0, var0)])
예제 #8
0
    def testTensorLearningRate(self):
        for dtype in self.float_types:
            with self.cached_session(), self.test_scope():
                variable_scope.get_variable_scope().set_use_resource(True)
                # Initialize variables for numpy implementation.
                m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
                var0_np = np.array([1.0, 2.0], dtype=dtype)
                grads0_np = np.array([0.1, 0.1], dtype=dtype)
                var1_np = np.array([3.0, 4.0], dtype=dtype)
                grads1_np = np.array([0.01, 0.01], dtype=dtype)

                var0 = resource_variable_ops.ResourceVariable(var0_np)
                var1 = resource_variable_ops.ResourceVariable(var1_np)
                grads0 = constant_op.constant(grads0_np)
                grads1 = constant_op.constant(grads1_np)
                opt = adamax.AdaMaxOptimizer(constant_op.constant(0.001))
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                variables.global_variables_initializer().run()

                # Fetch params to validate initial values
                self.assertAllClose([1.0, 2.0], self.evaluate(var0))
                self.assertAllClose([3.0, 4.0], self.evaluate(var1))

                beta1_power = opt._get_beta_accumulators()

                # Run 3 steps of AdaMax
                for t in range(1, 4):
                    self.assertAllCloseAccordingToType(
                        0.9**t, self.evaluate(beta1_power))
                    update.run()

                    var0_np, m0, v0 = adamax_update_numpy(
                        var0_np, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adamax_update_numpy(
                        var1_np, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np,
                                                       self.evaluate(var0))
                    self.assertAllCloseAccordingToType(var1_np,
                                                       self.evaluate(var1))
예제 #9
0
    def doTestBasic(self, use_resource=False):
        for i, dtype in enumerate(
            [dtypes.half, dtypes.float32, dtypes.float64]):
            with self.test_session(graph=ops.Graph()):
                # Initialize variables for numpy implementation.
                m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
                var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
                grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
                var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
                grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

                if use_resource:
                    var0 = resource_variable_ops.ResourceVariable(
                        var0_np, name="var0_%d" % i)
                    var1 = resource_variable_ops.ResourceVariable(
                        var1_np, name="var1_%d" % i)
                else:
                    var0 = variables.Variable(var0_np)
                    var1 = variables.Variable(var1_np)
                grads0 = constant_op.constant(grads0_np)
                grads1 = constant_op.constant(grads1_np)

                opt = adamax.AdaMaxOptimizer()
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                opt_variables = opt.variables()
                beta1_power = opt._get_beta_accumulators()
                self.assertTrue(beta1_power is not None)
                self.assertIn(beta1_power, opt_variables)

                if not context.executing_eagerly():
                    with ops.Graph().as_default():
                        # Shouldn't return non-slot variables from other graphs.
                        self.assertEqual(0, len(opt.variables()))

                    self.evaluate(variables.global_variables_initializer())
                    # Fetch params to validate initial values
                    self.assertAllClose([1.0, 2.0], self.evaluate(var0))
                    self.assertAllClose([3.0, 4.0], self.evaluate(var1))

                beta1_power = opt._get_beta_accumulators()

                # Run 3 steps of AdaMax
                for t in range(1, 4):
                    if not context.executing_eagerly():
                        self.evaluate(update)
                    elif t > 1:
                        opt.apply_gradients(zip([grads0, grads1],
                                                [var0, var1]))

                    self.assertAllCloseAccordingToType(
                        0.9**(t + 1), self.evaluate(beta1_power))

                    var0_np, m0, v0 = adamax_update_numpy(
                        var0_np, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adamax_update_numpy(
                        var1_np, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np,
                                                       self.evaluate(var0))
                    self.assertAllCloseAccordingToType(var1_np,
                                                       self.evaluate(var1))
                    if use_resource:
                        self.assertEqual("var0_%d/AdaMax:0" % (i, ),
                                         opt.get_slot(var=var0, name="m").name)
예제 #10
0
plt.title('Optimizer: AdaMax')
plt.xlabel('training_epochs')
plt.ylabel('loss')

# Construct model
pred = multilayer_perceptron(x, weights, biases)

# Define loss and optimizer
with tf.name_scope('cost'):
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))

# optimizer setting
with tf.name_scope('optimizer'):
    optimizer = adamax.AdaMaxOptimizer(
        learning_rate=learning_rate).minimize(cost)

# Initializing the variables
init = tf.global_variables_initializer()

all_loss = []
all_step = []

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    # Training cycle
    for epoch in range(training_epochs):

        avg_cost = 0.
        epoch_cost = 0.