Beispiel #1
0
 def testSparseRepeatedIndices(self):
   # TODO(tanzheny, omalleyt): Fix test in eager mode.
   for dtype in [tf.half, tf.float32, tf.float64]:
     with tf.Graph().as_default(), self.cached_session():
       repeated_index_update_var = tf.Variable(
           [[1.0], [2.0]], dtype=dtype)
       aggregated_update_var = tf.Variable(
           [[1.0], [2.0]], dtype=dtype)
       grad_repeated_index = tf.IndexedSlices(
           tf.constant(
               [0.1, 0.1], shape=[2, 1], dtype=dtype),
           tf.constant([1, 1]),
           tf.constant([2, 1]))
       grad_aggregated = tf.IndexedSlices(
           tf.constant(
               [0.2], shape=[1, 1], dtype=dtype),
           tf.constant([1]),
           tf.constant([2, 1]))
       repeated_update = adam.NonFusedAdam().apply_gradients(
           [(grad_repeated_index, repeated_index_update_var)])
       aggregated_update = adam.NonFusedAdam().apply_gradients(
           [(grad_aggregated, aggregated_update_var)])
       self.evaluate(tf.compat.v1.global_variables_initializer())
       self.assertAllClose(aggregated_update_var,
                           self.evaluate(repeated_index_update_var))
       for _ in range(3):
         repeated_update.run()
         aggregated_update.run()
         self.assertAllClose(aggregated_update_var,
                             self.evaluate(repeated_index_update_var))
Beispiel #2
0
  def testSparseWithAmsgrad(self):
    # dtypes.half does not work on gpu + eager.
    for dtype in [tf.float32, tf.float64]:
      with self.cached_session():
        m0 = np.array([[0.0], [0.0]])
        v0 = np.array([[0.0], [0.0]])
        v0hat = np.array([[0.0], [0.0]])
        indices_np = np.array([1])
        indices = tf.constant(indices_np, dtype=tf.int32)
        var0_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype)
        repeated_index_update_var = tf.Variable(var0_np, dtype=dtype)
        aggregated_update_var = tf.Variable(var0_np, dtype=dtype)
        grads0_np = np.array([[0.2]], dtype=dtype.as_numpy_dtype)
        grad_repeated_index = tf.IndexedSlices(
            tf.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
            tf.constant([1, 1]), tf.constant([2, 1]))
        grad_aggregated = tf.IndexedSlices(grads0_np, indices,
                                            tf.constant([2, 1]))
        opt_repeated = adam.NonFusedAdam(amsgrad=True)
        opt_aggregated = adam.NonFusedAdam(amsgrad=True)
        if not tf.executing_eagerly():
          repeated_update = opt_repeated.apply_gradients(
              [(grad_repeated_index, repeated_index_update_var)])
          aggregated_update = opt_aggregated.apply_gradients(
              [(grad_aggregated, aggregated_update_var)])
        self.evaluate(tf.compat.v1.global_variables_initializer())
        self.assertAllClose(
            self.evaluate(aggregated_update_var),
            self.evaluate(repeated_index_update_var))
        for t in range(3):
          if not tf.executing_eagerly():
            self.evaluate(repeated_update)
            self.evaluate(aggregated_update)
          else:
            opt_repeated.apply_gradients(
                [(grad_repeated_index, repeated_index_update_var)])
            opt_aggregated.apply_gradients(
                [(grad_aggregated, aggregated_update_var)])

          var0_np, m0, v0, v0hat = adam_sparse_update_numpy_amsgrad(
              var0_np, indices_np, grads0_np, t, m0, v0, v0hat)

          # Validate updated params
          self.assertAllCloseAccordingToType(
              var0_np, self.evaluate(aggregated_update_var))
          self.assertAllCloseAccordingToType(
              self.evaluate(aggregated_update_var),
              self.evaluate(repeated_index_update_var))
Beispiel #3
0
  def doTestBasic(self, use_callable_params=False):
    for i, dtype in enumerate([tf.half, tf.float32, tf.float64]):
      with self.cached_session():
        # Initialize variables for numpy implementation.
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
        grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
        grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

        var0 = tf.Variable(var0_np, name="var0_%d" % i)
        var1 = tf.Variable(var1_np, name="var1_%d" % i)
        grads0 = tf.constant(grads0_np)
        grads1 = tf.constant(grads1_np)

        learning_rate = lambda: 0.001
        beta1 = lambda: 0.9
        beta2 = lambda: 0.999
        epsilon = lambda: 1e-8
        if not use_callable_params:
          learning_rate = learning_rate()
          beta1 = beta1()
          beta2 = beta2()
          epsilon = epsilon()

        opt = adam.NonFusedAdam(learning_rate=learning_rate)
        if not tf.executing_eagerly():
          update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))

        self.evaluate(tf.compat.v1.global_variables_initializer())
        # Run 3 steps of NonFusedAdam
        for t in range(3):
          beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
          self.assertAllCloseAccordingToType(0.9**(t + 1),
                                             self.evaluate(beta_1_power))
          self.assertAllCloseAccordingToType(0.999**(t + 1),
                                             self.evaluate(beta_2_power))
          if not tf.executing_eagerly():
            self.evaluate(update)
          else:
            opt.apply_gradients(zip([grads0, grads1], [var0, var1]))

          var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
          var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)

          # Validate updated params
          self.assertAllCloseAccordingToType(
              var0_np, self.evaluate(var0), rtol=1e-4, atol=1e-4)
          self.assertAllCloseAccordingToType(
              var1_np, self.evaluate(var1), rtol=1e-4, atol=1e-4)
Beispiel #4
0
 def testSparseDevicePlacement(self):
   # TODO(tanzheny, omalleyt): Fix test in eager mode.
   for index_dtype in [tf.int32, tf.int64]:
     with tf.Graph().as_default(), self.cached_session(
         force_gpu=tf.test.is_gpu_available()):
       # If a GPU is available, tests that all optimizer ops can be placed on
       # it (i.e. they have GPU kernels).
       var = tf.Variable([[1.0], [2.0]])
       indices = tf.constant([0, 1], dtype=index_dtype)
       g_sum = lambda: tf.reduce_sum(tf.gather(var, indices))  # pylint: disable=cell-var-from-loop
       optimizer = adam.NonFusedAdam(3.0)
       minimize_op = optimizer.minimize(g_sum, var_list=[var])
       self.evaluate(tf.compat.v1.global_variables_initializer())
       minimize_op.run()
Beispiel #5
0
  def testBasicWithLearningRateInverseTimeDecay(self):
    # TODO(tanzheny, omalleyt): Fix test in eager mode.
    for i, dtype in enumerate([tf.half, tf.float32, tf.float64]):
      with tf.Graph().as_default(), self.cached_session():
        # Initialize variables for numpy implementation.
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
        grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
        grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

        var0 = tf.Variable(var0_np, name="var0_%d" % i)
        var1 = tf.Variable(var1_np, name="var1_%d" % i)
        grads0 = tf.constant(grads0_np)
        grads1 = tf.constant(grads1_np)

        learning_rate = 0.001
        decay = 0.5
        lr_schedule = learning_rate_schedule.InverseTimeDecay(
            learning_rate, decay_steps=1.0, decay_rate=decay)
        beta_1 = 0.9
        beta_2 = 0.999
        epsilon = 1e-7

        opt = adam.NonFusedAdam(
            learning_rate=lr_schedule,
            beta_1=beta_1,
            beta_2=beta_2,
            epsilon=epsilon)
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))

        self.evaluate(tf.compat.v1.global_variables_initializer())
        # Run 3 steps of NonFusedAdam
        for t in range(3):
          self.evaluate(update)

          lr_np = learning_rate / (1 + decay * t)

          var0_np, m0, v0 = adam_update_numpy(
              var0_np, grads0_np, t, m0, v0, lr=lr_np)
          var1_np, m1, v1 = adam_update_numpy(
              var1_np, grads1_np, t, m1, v1, lr=lr_np)

          # Validate updated params
          self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
          self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
Beispiel #6
0
  def testSparse(self):
    # TODO(tanzheny, omalleyt): Fix test in eager mode.
    for dtype in [tf.half, tf.float32, tf.float64]:
      with tf.Graph().as_default(), self.cached_session():
        # Initialize variables for numpy implementation.
        m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
        var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
        grads0_np = np.array([0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
        grads1_np = np.array([0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype)

        var0 = tf.Variable(var0_np)
        var1 = tf.Variable(var1_np)
        grads0_np_indices = np.array([0, 2], dtype=np.int32)
        grads0 = tf.IndexedSlices(
            tf.constant(grads0_np[grads0_np_indices]),
            tf.constant(grads0_np_indices), tf.constant([3]))
        grads1_np_indices = np.array([0, 2], dtype=np.int32)
        grads1 = tf.IndexedSlices(
            tf.constant(grads1_np[grads1_np_indices]),
            tf.constant(grads1_np_indices), tf.constant([3]))
        opt = adam.NonFusedAdam()
        update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
        self.evaluate(tf.compat.v1.global_variables_initializer())

        # Fetch params to validate initial values
        self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0))
        self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1))

        beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
        # Run 3 steps of NonFusedAdam
        for t in range(3):
          self.assertAllCloseAccordingToType(0.9**(t + 1),
                                             self.evaluate(beta_1_power))
          self.assertAllCloseAccordingToType(0.999**(t + 1),
                                             self.evaluate(beta_2_power))
          update.run()

          var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
          var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)

          # Validate updated params
          self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
          self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
Beispiel #7
0
  def testBasicWithAmsgrad(self):
    for i, dtype in enumerate([tf.half, tf.float32, tf.float64]):
      with self.cached_session():
        # Initialize variables for numpy implementation.
        m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
        var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
        grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
        grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

        var0 = tf.Variable(var0_np, name="var0_%d" % i)
        var1 = tf.Variable(var1_np, name="var1_%d" % i)
        grads0 = tf.constant(grads0_np)
        grads1 = tf.constant(grads1_np)

        opt = adam.NonFusedAdam(amsgrad=True)
        if not tf.executing_eagerly():
          update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))

        self.evaluate(tf.compat.v1.global_variables_initializer())
        # Run 3 steps of NonFusedAdam
        for t in range(3):
          beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
          self.assertAllCloseAccordingToType(0.9**(t + 1),
                                             self.evaluate(beta_1_power))
          self.assertAllCloseAccordingToType(0.999**(t + 1),
                                             self.evaluate(beta_2_power))
          if not tf.executing_eagerly():
            self.evaluate(update)
          else:
            opt.apply_gradients(zip([grads0, grads1], [var0, var1]))

          var0_np, m0, v0, v0hat = adam_update_numpy_amsgrad(
              var0_np, grads0_np, t, m0, v0, v0hat)
          var1_np, m1, v1, v1hat = adam_update_numpy_amsgrad(
              var1_np, grads1_np, t, m1, v1, v1hat)

          # Validate updated params
          self.assertAllCloseAccordingToType(
              var0_np, self.evaluate(var0), rtol=1e-4, atol=1e-4)
          self.assertAllCloseAccordingToType(
              var1_np, self.evaluate(var1), rtol=1e-4, atol=1e-4)
class KerasOptimizerBenchmark(
    tf.test.Benchmark, metaclass=ParameterizedBenchmark
):
    """Keras optimizer benchmarks."""

    # The parameter of each benchmark test is a tuple, and the first one is
    # the optimizer name.
    _benchmark_parameters = benchmark_util.generate_benchmark_params_cpu_gpu(
        [
            ("Adam", tf.keras.optimizers.Adam(), 10),
            ("NonFusedAdam", adam.NonFusedAdam(), 10),
        ]
    )

    def benchmark_optimizer(self, optimizer, num_iters):
        """Optimizer benchmark with Bidirectional LSTM model on IMDB data.

        Args:
          optimizer: The optimizer instance to be benchmarked.
          num_iters: The number of iterations to run for performance
            measurement.
        """
        model, train_x, train_y = bidirect_imdb_lstm_config()
        metrics, wall_time, extras = benchmark_util.measure_performance(
            model,
            x=train_x,
            y=train_y,
            batch_size=512,
            optimizer=optimizer,
            loss="binary_crossentropy",
            metrics=["accuracy"],
        )
        name = benchmark_util.get_benchmark_name(self._get_name())
        metadata = {
            "implementation": name[0],
            "model_name": "optimizers",
            "parameters": "lstm.512",
        }
        extras.update(metadata)
        self.report_benchmark(
            iters=num_iters, wall_time=wall_time, metrics=metrics, extras=extras
        )