def testSparseRepeatedIndices(self): for dtype in [tf.dtypes.half, tf.dtypes.float32, tf.dtypes.float64]: with self.cached_session(): repeated_index_update_var = tf.Variable([[1.0], [2.0]], dtype=dtype) aggregated_update_var = tf.Variable([[1.0], [2.0]], dtype=dtype) grad_repeated_index = tf.IndexedSlices( tf.constant([0.1, 0.1], shape=[2, 1], dtype=dtype), tf.constant([1, 1]), tf.constant([2, 1])) grad_aggregated = tf.IndexedSlices( tf.constant([0.2], shape=[1, 1], dtype=dtype), tf.constant([1]), tf.constant([2, 1])) repeated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer() repeated_update = repeated_update_opt.apply_gradients([ (grad_repeated_index, repeated_index_update_var) ]) aggregated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer() aggregated_update = aggregated_update_opt.apply_gradients([ (grad_aggregated, aggregated_update_var) ]) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(aggregated_update_var.eval(), repeated_index_update_var.eval()) for _ in range(3): repeated_update.run() aggregated_update.run() self.assertAllClose(aggregated_update_var.eval(), repeated_index_update_var.eval())
def testSlotsUniqueEager(self): with context.eager_mode(): v1 = tf.Variable(1.) v2 = tf.Variable(1.) opt = lazy_adam_optimizer.LazyAdamOptimizer(1.) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and two unique slot variables for v1 and v2. self.assertEqual(5, len(set(opt.variables()))) self.assertEqual(self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
def testSparseDevicePlacement(self): for index_dtype in [tf.dtypes.int32, tf.dtypes.int64]: with self.cached_session(force_gpu=tf.test.is_gpu_available()): # If a GPU is available, tests that all optimizer ops can be placed on # it (i.e. they have GPU kernels). var = tf.Variable([[1.0], [2.0]]) indices = tf.constant([0, 1], dtype=index_dtype) g_sum = lambda: tf.math.reduce_sum(tf.gather(var, indices)) # pylint: disable=cell-var-from-loop optimizer = lazy_adam_optimizer.LazyAdamOptimizer(3.0) minimize_op = optimizer.minimize(g_sum, var_list=[var]) self.evaluate(variables.global_variables_initializer()) self.evaluate(minimize_op)
def testSparse(self): for dtype in [tf.dtypes.half, tf.dtypes.float32, tf.dtypes.float64]: with self.cached_session(): # Initialize tf for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0_np_indices = np.array([0, 2], dtype=np.int32) grads0 = tf.IndexedSlices( tf.constant(grads0_np[grads0_np_indices]), tf.constant(grads0_np_indices), tf.constant([3])) grads1_np_indices = np.array([0, 2], dtype=np.int32) grads1 = tf.IndexedSlices( tf.constant(grads1_np[grads1_np_indices]), tf.constant(grads1_np_indices), tf.constant([3])) opt = lazy_adam_optimizer.LazyAdamOptimizer() update = opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1)) beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) # Run 3 steps of Adam for t in range(3): self.assertAllCloseAccordingToType( 0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType( 0.999**(t + 1), self.evaluate(beta_2_power)) self.evaluate(update) var0_np, m0, v0 = adam_update_numpy( var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy( var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def doTestBasic(self, use_callable_params=False): # yapf: disable for i, dtype in enumerate([tf.dtypes.half, tf.dtypes.float32, tf.dtypes.float64]): # yapf: enable with self.session(graph=tf.Graph()): # Initialize tf for numpy implementation. m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0 var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, name="var0_%d" % i) var1 = tf.Variable(var1_np, name="var1_%d" % i) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = lambda: 0.001 beta1 = lambda: 0.9 beta2 = lambda: 0.999 epsilon = lambda: 1e-8 if not use_callable_params: learning_rate = learning_rate() beta1 = beta1() beta2 = beta2() epsilon = epsilon() opt = lazy_adam_optimizer.LazyAdamOptimizer( learning_rate=learning_rate) if not context.executing_eagerly(): update = opt.apply_gradients( zip([grads0, grads1], [var0, var1])) self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of Adam for t in range(3): beta_1_power, beta_2_power = get_beta_accumulators( opt, dtype) self.assertAllCloseAccordingToType( 0.9**(t + 1), self.evaluate(beta_1_power)) self.assertAllCloseAccordingToType( 0.999**(t + 1), self.evaluate(beta_2_power)) if not context.executing_eagerly(): self.evaluate(update) else: opt.apply_gradients(zip([grads0, grads1], [var0, var1])) var0_np, m0, v0 = adam_update_numpy( var0_np, grads0_np, t, m0, v0) var1_np, m1, v1 = adam_update_numpy( var1_np, grads1_np, t, m1, v1) # Validate updated params self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) self.assertEqual("var0_%d/m:0" % (i, ), opt.get_slot(var0, "m").name)