コード例 #1
0
ファイル: optimizer_v2_test.py プロジェクト: ttigong/keras
 def testConfigWithLearningRateDecay(self):
   with test_utils.use_gpu():
     var0 = tf.Variable([[1.0], [2.0]], dtype=tf.float32)
     for decay_schedule in [
         learning_rate_schedule.InverseTimeDecay(
             0.5, decay_steps=1.0, decay_rate=0.1),
         learning_rate_schedule.PiecewiseConstantDecay(
             [5], [1., .5])
     ]:
       step = 10
       opt = gradient_descent.SGD(decay_schedule)
       config = opt.get_config()
       opt2 = gradient_descent.SGD.from_config(config)
       # assert both are equal float values.
       self.assertAllEqual(
           decay_schedule(step),
           opt._get_hyper('learning_rate')(step))
       self.assertAllEqual(
           decay_schedule(step),
           opt2._get_hyper('learning_rate')(step))
       loss = lambda: 3 * var0
       # learning rate variable is created when calling minimize.
       opt.minimize(loss, [var0])
       self.evaluate(tf.compat.v1.global_variables_initializer())
       config = opt.get_config()
       opt3 = gradient_descent.SGD.from_config(config)
       self.assertAllEqual(
           self.evaluate(opt._get_hyper('learning_rate')(step)),
           opt3._get_hyper('learning_rate')(step))
コード例 #2
0
  def testPiecewiseConstantEdgeCases(self, serialize):
    # Test casting boundaries from int32 to int64.
    x_int64 = tf.Variable(0, dtype=tf.int64)
    boundaries, values = [1, 2, 3], [0.4, 0.5, 0.6, 0.7]
    decayed_lr = learning_rate_schedule.PiecewiseConstantDecay(
        boundaries, values)
    decayed_lr = _maybe_serialized(decayed_lr, serialize)

    self.evaluate(tf.compat.v1.global_variables_initializer())
    self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.4, 1e-6)
    self.evaluate(x_int64.assign(1))
    self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.4, 1e-6)
    self.evaluate(x_int64.assign(2))
    self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.5, 1e-6)
    self.evaluate(x_int64.assign(3))
    self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.6, 1e-6)
    self.evaluate(x_int64.assign(4))
    self.assertAllClose(self.evaluate(decayed_lr(x_int64)), 0.7, 1e-6)
コード例 #3
0
  def testPiecewiseConstant(self, serialize):
    x = tf.Variable(-999)
    decayed_lr = learning_rate_schedule.PiecewiseConstantDecay(
        [100, 110, 120], [1.0, 0.1, 0.01, 0.001])
    decayed_lr = _maybe_serialized(decayed_lr, serialize)

    self.evaluate(tf.compat.v1.global_variables_initializer())

    self.assertAllClose(self.evaluate(decayed_lr(x)), 1.0, 1e-6)
    self.evaluate(x.assign(100))
    self.assertAllClose(self.evaluate(decayed_lr(x)), 1.0, 1e-6)
    self.evaluate(x.assign(105))
    self.assertAllClose(self.evaluate(decayed_lr(x)), 0.1, 1e-6)
    self.evaluate(x.assign(110))
    self.assertAllClose(self.evaluate(decayed_lr(x)), 0.1, 1e-6)
    self.evaluate(x.assign(120))
    self.assertAllClose(self.evaluate(decayed_lr(x)), 0.01, 1e-6)
    self.evaluate(x.assign(999))
    self.assertAllClose(self.evaluate(decayed_lr(x)), 0.001, 1e-6)
コード例 #4
0
  def testPiecewiseFunction(self, serialize):
    if not tf.executing_eagerly():
      self.skipTest("Run on eager mode only.")

    del serialize
    v = tf.Variable(1.)
    def loss_fn():
      return v * v
    learning_rate = learning_rate_schedule.PiecewiseConstantDecay(
        [1.], [1., 0.1])
    opt = gradient_descent.SGD(learning_rate=learning_rate)

    @tf.function
    def minimize():
      with tf.GradientTape() as tape:
        loss = loss_fn()
      g = tape.gradient(loss, [v])
      opt.apply_gradients(list(zip(g, [v])))

    minimize()
    self.assertAllEqual(v.read_value(), -1.0)
コード例 #5
0
def piecewise_constant(x, boundaries, values, name=None):
    """Piecewise constant from boundaries and interval values.

  Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5
    for the next 10000 steps, and 0.1 for any additional steps.

  ```python
  global_step = tf.Variable(0, trainable=False)
  boundaries = [100000, 110000]
  values = [1.0, 0.5, 0.1]
  learning_rate = tf.compat.v1.train.piecewise_constant(global_step, boundaries,
  values)

  # Later, whenever we perform an optimization step, we increment global_step.
  ```

  Args:
    x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
      `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
    boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
      increasing entries, and with all elements having the same type as `x`.
    values: A list of `Tensor`s or `float`s or `int`s that specifies the values
      for the intervals defined by `boundaries`. It should have one more element
      than `boundaries`, and all elements should have the same type.
    name: A string. Optional name of the operation. Defaults to
      'PiecewiseConstant'.

  Returns:
    A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`,
    `values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ...,
    and values[-1] when `x > boundaries[-1]`.

  Raises:
    ValueError: if types of `x` and `boundaries` do not match, or types of all
        `values` do not match or
        the number of elements in the lists does not match.

  @compatibility(eager)
  When eager execution is enabled, this function returns a function which in
  turn returns the decayed learning rate Tensor. This can be useful for changing
  the learning rate value across different invocations of optimizer functions.
  @end_compatibility
  """
    boundaries = tf.nest.map_structure(tf.convert_to_tensor,
                                       tf.nest.flatten(boundaries))
    values = tf.nest.map_structure(tf.convert_to_tensor,
                                   tf.nest.flatten(values))
    x_recomp = tf.convert_to_tensor(x)
    # Avoid explicit conversion to x's dtype. This could result in faulty
    # comparisons, for example if floats are converted to integers.
    for i, b in enumerate(boundaries):
        if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
            # We can promote int32 boundaries to int64 without loss of precision.
            # This covers the most common case where the user passes in boundaries
            # as an array of Python integers.
            if (b.dtype.base_dtype == tf.int32
                    and x_recomp.dtype.base_dtype == tf.int64):
                b = tf.cast(b, x_recomp.dtype.base_dtype)
                boundaries[i] = b
            else:
                raise ValueError(
                    f"`boundaries` ({b.dtype.base_dtype}) must have the same dtype as "
                    f"x ({x_recomp.dtype.base_dtype}).")
    for v in values[1:]:
        if v.dtype.base_dtype != values[0].dtype.base_dtype:
            raise ValueError(
                f"`values` must have elements all with the same dtype "
                f"({values[0].dtype.base_dtype} vs {v.dtype.base_dtype}).")
    decayed_lr = learning_rate_schedule.PiecewiseConstantDecay(boundaries,
                                                               values,
                                                               name=name)
    if not tf.executing_eagerly():
        decayed_lr = decayed_lr(x)
    else:
        decayed_lr = functools.partial(decayed_lr, x)
    return decayed_lr