Ejemplo n.º 1
0
    def __init__(self, optimizer_name, lr, hparams, use_tpu=False):  # pylint: disable=super-init-not-called
        tf.logging.info("Using optimizer %s", optimizer_name)

        mlperf_log.transformer_print(key=mlperf_log.OPT_NAME,
                                     value=optimizer_name)
        mlperf_log.transformer_print(key=mlperf_log.OPT_HP_ADAM_BETA1,
                                     value=hparams.optimizer_adam_beta1)
        mlperf_log.transformer_print(key=mlperf_log.OPT_HP_ADAM_BETA2,
                                     value=hparams.optimizer_adam_beta2)
        mlperf_log.transformer_print(key=mlperf_log.OPT_HP_ADAM_EPSILON,
                                     value=hparams.optimizer_adam_epsilon)

        if optimizer_name == "Adam":
            # We change the default epsilon for Adam.
            # Using LazyAdam as it's much faster for large vocabulary embeddings.
            self._opt = tf.contrib.opt.LazyAdamOptimizer(
                lr,
                beta1=hparams.optimizer_adam_beta1,
                beta2=hparams.optimizer_adam_beta2,
                epsilon=hparams.optimizer_adam_epsilon)
        elif optimizer_name == "MultistepAdam":
            self._opt = multistep_optimizer.MultistepAdamOptimizer(
                lr,
                beta1=hparams.optimizer_adam_beta1,
                beta2=hparams.optimizer_adam_beta2,
                epsilon=hparams.optimizer_adam_epsilon,
                n=hparams.optimizer_multistep_accumulate_steps)
        elif optimizer_name == "Momentum":
            self._opt = tf.train.MomentumOptimizer(
                lr,
                momentum=hparams.optimizer_momentum_momentum,
                use_nesterov=hparams.optimizer_momentum_nesterov)
        elif optimizer_name == "YellowFin":
            self._opt = yellowfin.YellowFinOptimizer(
                learning_rate=lr, momentum=hparams.optimizer_momentum_momentum)
        elif optimizer_name == "TrueAdam":
            self._opt = tf.train.AdamOptimizer(
                lr,
                beta1=hparams.optimizer_adam_beta1,
                beta2=hparams.optimizer_adam_beta2,
                epsilon=hparams.optimizer_adam_epsilon)
        elif optimizer_name == "AdamW":
            # Openai gpt used weight decay.
            # Given the internals of AdamW, weight decay dependent on the
            # learning rate is chosen to match the openai implementation.
            # The weight decay update to each parameter is applied before the adam
            # gradients computation, which is different from that described
            # in the paper and in the openai implementation:
            # https://arxiv.org/pdf/1711.05101.pdf
            self._opt = tf.contrib.opt.AdamWOptimizer(
                0.01 * lr,
                lr,
                beta1=hparams.optimizer_adam_beta1,
                beta2=hparams.optimizer_adam_beta2,
                epsilon=hparams.optimizer_adam_epsilon)
        elif optimizer_name == "Adafactor":
            self._opt = adafactor.adafactor_optimizer_from_hparams(hparams, lr)
        else:
            self._opt = tf.contrib.layers.OPTIMIZER_CLS_NAMES[optimizer_name](
                lr)
Ejemplo n.º 2
0
def multistep_adam(learning_rate, hparams):
    return multistep_optimizer.MultistepAdamOptimizer(
        learning_rate,
        beta1=hparams.optimizer_adam_beta1,
        beta2=hparams.optimizer_adam_beta2,
        epsilon=hparams.optimizer_adam_epsilon,
        n=hparams.optimizer_multistep_accumulate_steps)
Ejemplo n.º 3
0
 def testResourceVariables(self):
     v1 = tf.Variable([1., 2.], use_resource=True)
     v2 = tf.Variable([3., 4.], use_resource=True)
     with tf.GradientTape() as tape:
         tape.watch([v1, v2])
         loss = tf.reduce_sum(tf.gather(params=v1, indices=[0]) + v2)
     v1_grad, v2_grad = tape.gradient(loss, [v1, v2])
     multistep_opt = multistep_optimizer.MultistepAdamOptimizer(0.1)
     multistep_opt.apply_gradients(((v1_grad, v1), (v2_grad, v2)))
Ejemplo n.º 4
0
    def __init__(self, optimizer_name, lr, hparams, use_tpu=False):  # pylint: disable=super-init-not-called
        tf.logging.info("Using optimizer %s", optimizer_name)

        mlperf_log.transformer_print(key=mlperf_log.OPT_NAME,
                                     value=optimizer_name)
        mlperf_log.transformer_print(key=mlperf_log.OPT_HP_ADAM_BETA1,
                                     value=hparams.optimizer_adam_beta1)
        mlperf_log.transformer_print(key=mlperf_log.OPT_HP_ADAM_BETA2,
                                     value=hparams.optimizer_adam_beta2)
        mlperf_log.transformer_print(key=mlperf_log.OPT_HP_ADAM_EPSILON,
                                     value=hparams.optimizer_adam_epsilon)

        if optimizer_name == "Adam":
            # We change the default epsilon for Adam.
            # Using LazyAdam as it's much faster for large vocabulary embeddings.
            self._opt = tf.contrib.opt.LazyAdamOptimizer(
                lr,
                beta1=hparams.optimizer_adam_beta1,
                beta2=hparams.optimizer_adam_beta2,
                epsilon=hparams.optimizer_adam_epsilon)
        elif optimizer_name == "MultistepAdam":
            self._opt = multistep_optimizer.MultistepAdamOptimizer(
                lr,
                beta1=hparams.optimizer_adam_beta1,
                beta2=hparams.optimizer_adam_beta2,
                epsilon=hparams.optimizer_adam_epsilon,
                n=hparams.optimizer_multistep_accumulate_steps)
        elif optimizer_name == "Momentum":
            self._opt = tf.train.MomentumOptimizer(
                lr,
                momentum=hparams.optimizer_momentum_momentum,
                use_nesterov=hparams.optimizer_momentum_nesterov)
        elif optimizer_name == "YellowFin":
            self._opt = yellowfin.YellowFinOptimizer(
                learning_rate=lr, momentum=hparams.optimizer_momentum_momentum)
        elif optimizer_name == "TrueAdam":
            self._opt = tf.train.AdamOptimizer(
                lr,
                beta1=hparams.optimizer_adam_beta1,
                beta2=hparams.optimizer_adam_beta2,
                epsilon=hparams.optimizer_adam_epsilon)
        elif optimizer_name == "Adafactor":
            self._opt = adafactor.adafactor_optimizer_from_hparams(hparams, lr)
        else:
            self._opt = tf.contrib.layers.OPTIMIZER_CLS_NAMES[optimizer_name](
                lr)
Ejemplo n.º 5
0
  def __init__(self, optimizer_name, lr, hparams, use_tpu=False):  # pylint: disable=super-init-not-called
    if optimizer_name == "Adam" and use_tpu:
      # LazyAdamOptimizer does not work on TPU
      optimizer_name = "TrueAdam"

    tf.logging.info("Using optimizer %s", optimizer_name)

    if optimizer_name == "Adam":
      # We change the default epsilon for Adam and re-scale lr.
      # Using LazyAdam as it's much faster for large vocabulary embeddings.
      self._opt = tf.contrib.opt.LazyAdamOptimizer(
          lr,
          beta1=hparams.optimizer_adam_beta1,
          beta2=hparams.optimizer_adam_beta2,
          epsilon=hparams.optimizer_adam_epsilon)
    elif optimizer_name == "MultistepAdam":
      self._opt = multistep_optimizer.MultistepAdamOptimizer(
          lr,
          beta1=hparams.optimizer_adam_beta1,
          beta2=hparams.optimizer_adam_beta2,
          epsilon=hparams.optimizer_adam_epsilon,
          n=hparams.optimizer_multistep_accumulate_steps)
    elif optimizer_name == "Momentum":
      self._opt = tf.train.MomentumOptimizer(
          lr,
          momentum=hparams.optimizer_momentum_momentum,
          use_nesterov=hparams.optimizer_momentum_nesterov)
    elif optimizer_name == "YellowFin":
      self._opt = yellowfin.YellowFinOptimizer(
          learning_rate=lr, momentum=hparams.optimizer_momentum_momentum)
    elif optimizer_name == "TrueAdam":
      self._opt = tf.train.AdamOptimizer(
          lr,
          beta1=hparams.optimizer_adam_beta1,
          beta2=hparams.optimizer_adam_beta2,
          epsilon=hparams.optimizer_adam_epsilon)
    elif optimizer_name == "Adafactor":
      self._opt = adafactor.adafactor_optimizer_from_hparams(hparams, lr)
    else:
      self._opt = tf.contrib.layers.OPTIMIZER_CLS_NAMES[optimizer_name](lr)
Ejemplo n.º 6
0
    def testMultistep(self):
        dtype = tf.float32
        beta1 = 0.2
        beta2 = 0.99
        alpha = 10.0
        grads0_np_lst = [
            np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype),
            np.array([0.2, -0.1], dtype=dtype.as_numpy_dtype),
            np.array([0.3, 0.1], dtype=dtype.as_numpy_dtype),
            np.array([0.4, -0.1], dtype=dtype.as_numpy_dtype)
        ]
        grads1_np_lst = [
            np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype),
            np.array([0.02, 0.02], dtype=dtype.as_numpy_dtype),
            np.array([-0.04, 0.04], dtype=dtype.as_numpy_dtype),
            np.array([-0.04, 0.06], dtype=dtype.as_numpy_dtype)
        ]
        var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
        var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
        # Test accumulating gradients for n=1..4 steps
        for n in range(1, 5):
            with tf.Graph().as_default():
                with tf.Session():
                    singlestep_var0 = tf.Variable(var0_np)
                    singlestep_var1 = tf.Variable(var1_np)

                    multistep_var0 = tf.Variable(var0_np)
                    multistep_var1 = tf.Variable(var1_np)

                    singlestep_opt = tf.train.AdamOptimizer(
                        beta1=beta1, beta2=beta2, learning_rate=alpha)
                    multistep_opt = multistep_optimizer.MultistepAdamOptimizer(
                        n=n, beta1=beta1, beta2=beta2, learning_rate=alpha)

                    singlestep_update = singlestep_opt.apply_gradients([
                        (tf.constant(sum(grads0_np_lst[:n]) / n),
                         singlestep_var0),
                        (tf.constant(sum(grads1_np_lst[:n]) / n),
                         singlestep_var1)
                    ])
                    multistep_updates = [
                        multistep_opt.apply_gradients([
                            (tf.constant(g0), multistep_var0),
                            (tf.constant(g1), multistep_var1)
                        ]) for g0, g1 in zip(grads0_np_lst, grads1_np_lst)
                    ][:n]

                    self.evaluate(tf.global_variables_initializer())
                    (singlestep_beta1_power, singlestep_beta2_power
                     ) = singlestep_opt._get_beta_accumulators()
                    (multistep_beta1_power, multistep_beta2_power
                     ) = multistep_opt._get_beta_accumulators()

                    # Run 3 steps of Adam
                    for _ in range(1, 4):
                        self.evaluate(singlestep_update)
                        for multistep_update in multistep_updates:
                            self.evaluate(multistep_update)

                        self.assertAllCloseAccordingToType(
                            self.evaluate(singlestep_beta1_power),
                            self.evaluate(multistep_beta1_power))
                        self.assertAllCloseAccordingToType(
                            self.evaluate(singlestep_beta2_power),
                            self.evaluate(multistep_beta2_power))
                        # Validate updated params
                        self.assertAllCloseAccordingToType(
                            self.evaluate(singlestep_var0),
                            self.evaluate(multistep_var0))
                        self.assertAllCloseAccordingToType(
                            self.evaluate(singlestep_var1),
                            self.evaluate(multistep_var1))
Ejemplo n.º 7
0
  def __init__(self, optimizer_name, lr, hparams, use_tpu=False):  # pylint: disable=super-init-not-called
    tf.logging.info("Using optimizer %s", optimizer_name)

    mlperf_log.transformer_print(key=mlperf_log.OPT_NAME,
                                 value=optimizer_name,
                                 hparams=hparams)
    mlperf_log.transformer_print(
        key=mlperf_log.OPT_HP_ADAM_BETA1, value=hparams.optimizer_adam_beta1,
        hparams=hparams)
    mlperf_log.transformer_print(
        key=mlperf_log.OPT_HP_ADAM_BETA2, value=hparams.optimizer_adam_beta2,
        hparams=hparams)
    mlperf_log.transformer_print(
        key=mlperf_log.OPT_HP_ADAM_EPSILON,
        value=hparams.optimizer_adam_epsilon,
        hparams=hparams)

    if optimizer_name == "Adam":
      # We change the default epsilon for Adam.
      # Using LazyAdam as it's much faster for large vocabulary embeddings.
      self._opt = tf.contrib.opt.LazyAdamOptimizer(
          lr,
          beta1=hparams.optimizer_adam_beta1,
          beta2=hparams.optimizer_adam_beta2,
          epsilon=hparams.optimizer_adam_epsilon)
    elif optimizer_name == "MultistepAdam":
      self._opt = multistep_optimizer.MultistepAdamOptimizer(
          lr,
          beta1=hparams.optimizer_adam_beta1,
          beta2=hparams.optimizer_adam_beta2,
          epsilon=hparams.optimizer_adam_epsilon,
          n=hparams.optimizer_multistep_accumulate_steps)
    elif optimizer_name == "Momentum":
      self._opt = tf.train.MomentumOptimizer(
          lr,
          momentum=hparams.optimizer_momentum_momentum,
          use_nesterov=hparams.optimizer_momentum_nesterov)
    elif optimizer_name == "YellowFin":
      self._opt = yellowfin.YellowFinOptimizer(
          learning_rate=lr, momentum=hparams.optimizer_momentum_momentum)
    elif optimizer_name == "TrueAdam":
      self._opt = tf.train.AdamOptimizer(
          lr,
          beta1=hparams.optimizer_adam_beta1,
          beta2=hparams.optimizer_adam_beta2,
          epsilon=hparams.optimizer_adam_epsilon)
    elif optimizer_name == "AdamW":
      # Openai gpt used weight decay.
      # Given the internals of AdamW, weight decay dependent on the
      # learning rate is chosen to match the openai implementation.
      # The weight decay update to each parameter is applied before the adam
      # gradients computation, which is different from that described
      # in the paper and in the openai implementation:
      # https://arxiv.org/pdf/1711.05101.pdf
      self._opt = tf.contrib.opt.AdamWOptimizer(
          0.01*lr,
          lr,
          beta1=hparams.optimizer_adam_beta1,
          beta2=hparams.optimizer_adam_beta2,
          epsilon=hparams.optimizer_adam_epsilon)
    elif optimizer_name == "Adafactor":
      self._opt = adafactor.adafactor_optimizer_from_hparams(hparams, lr)
    else:
      self._opt = tf.contrib.layers.OPTIMIZER_CLS_NAMES[optimizer_name](lr)
    if _mixed_precision_is_enabled(hparams):
      if not hparams.mixed_precision_optimizer_loss_scaler:
        tf.logging.warning("Using mixed precision without a loss scaler will "
                           "likely cause numerical errors.")
      elif hparams.mixed_precision_optimizer_loss_scaler != "exponential":
        raise ValueError("Mixed precision training only supports the "
                         "exponential loss scaler")
      else:
        tf.logging.info("Using Exponential Update Loss Scaler")
        manager = tf.contrib.mixed_precision.ExponentialUpdateLossScaleManager(
            init_loss_scale=2**15,
            incr_every_n_steps=2000,
            decr_every_n_nan_or_inf=2,
            incr_ratio=2,
            decr_ratio=0.5)
        self._opt = LossScaleOptimizer(self._opt, manager)

    self._zero_grads = hparams.optimizer_zero_grads