def _create_learning_rate(learning_rate_config, global_step=None):
  """Create optimizer learning rate based on config.

  Args:
    learning_rate_config: A LearningRate proto message.
    global_step: A variable representing the current step.
      If None, defaults to tf.train.get_or_create_global_step()

  Returns:
    A learning rate.

  Raises:
    ValueError: when using an unsupported input data type.
  """
  if global_step is None:
    global_step = tf.train.get_or_create_global_step()
  learning_rate = None
  learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
  if learning_rate_type == 'constant_learning_rate':
    config = learning_rate_config.constant_learning_rate
    learning_rate = tf.constant(config.learning_rate, dtype=tf.float32,
                                name='learning_rate')

  if learning_rate_type == 'exponential_decay_learning_rate':
    config = learning_rate_config.exponential_decay_learning_rate
    learning_rate = learning_schedules.exponential_decay_with_burnin(
        global_step,
        config.initial_learning_rate,
        config.decay_steps,
        config.decay_factor,
        burnin_learning_rate=config.burnin_learning_rate,
        burnin_steps=config.burnin_steps,
        min_learning_rate=config.min_learning_rate,
        staircase=config.staircase)

  if learning_rate_type == 'manual_step_learning_rate':
    config = learning_rate_config.manual_step_learning_rate
    if not config.schedule:
      raise ValueError('Empty learning rate schedule.')
    learning_rate_step_boundaries = [x.step for x in config.schedule]
    learning_rate_sequence = [config.initial_learning_rate]
    learning_rate_sequence += [x.learning_rate for x in config.schedule]
    learning_rate = learning_schedules.manual_stepping(
        global_step, learning_rate_step_boundaries,
        learning_rate_sequence, config.warmup)

  if learning_rate_type == 'cosine_decay_learning_rate':
    config = learning_rate_config.cosine_decay_learning_rate
    learning_rate = learning_schedules.cosine_decay_with_warmup(
        global_step,
        config.learning_rate_base,
        config.total_steps,
        config.warmup_learning_rate,
        config.warmup_steps,
        config.hold_base_rate_steps)

  if learning_rate is None:
    raise ValueError('Learning_rate %s not supported.' % learning_rate_type)

  return learning_rate
Ejemplo n.º 2
0
 def graph_fn(global_step):
     boundaries = [2, 3, 7]
     rates = [1.0, 2.0, 3.0, 4.0]
     learning_rate = learning_schedules.manual_stepping(
         global_step, boundaries, rates)
     assert learning_rate.op.name.endswith('learning_rate')
     return (learning_rate, )
Ejemplo n.º 3
0
 def graph_fn(global_step):
   boundaries = [4, 6, 8]
   rates = [0.02, 0.10, 0.01, 0.001]
   learning_rate = learning_schedules.manual_stepping(
       global_step, boundaries, rates, warmup=True)
   assert learning_rate.op.name.endswith('learning_rate')
   return (learning_rate,)
 def graph_fn(global_step):
   boundaries = [4, 6, 8]
   rates = [0.02, 0.10, 0.01, 0.001]
   learning_rate = learning_schedules.manual_stepping(
       global_step, boundaries, rates, warmup=True)
   assert learning_rate.op.name.endswith('learning_rate')
   return (learning_rate,)
Ejemplo n.º 5
0
 def graph_fn(global_step):
   boundaries = [2, 3, 7]
   rates = [1.0, 2.0, 3.0, 4.0]
   learning_rate = learning_schedules.manual_stepping(
       global_step, boundaries, rates)
   assert learning_rate.op.name.endswith('learning_rate')
   return (learning_rate,)
Ejemplo n.º 6
0
def _create_learning_rate(learning_rate_config):
  """Create optimizer learning rate based on config.

  Args:
    learning_rate_config: A LearningRate proto message.

  Returns:
    A learning rate.

  Raises:
    ValueError: when using an unsupported input data type.
  """
  learning_rate = None
  learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
  if learning_rate_type == 'constant_learning_rate':
    config = learning_rate_config.constant_learning_rate
    learning_rate = tf.constant(config.learning_rate, dtype=tf.float32,
                                name='learning_rate')

  if learning_rate_type == 'exponential_decay_learning_rate':
    config = learning_rate_config.exponential_decay_learning_rate
    learning_rate = learning_schedules.exponential_decay_with_burnin(
        tf.train.get_or_create_global_step(),
        config.initial_learning_rate,
        config.decay_steps,
        config.decay_factor,
        burnin_learning_rate=config.burnin_learning_rate,
        burnin_steps=config.burnin_steps,
        min_learning_rate=config.min_learning_rate,
        staircase=config.staircase)

  if learning_rate_type == 'manual_step_learning_rate':
    config = learning_rate_config.manual_step_learning_rate
    if not config.schedule:
      raise ValueError('Empty learning rate schedule.')
    learning_rate_step_boundaries = [x.step for x in config.schedule]
    learning_rate_sequence = [config.initial_learning_rate]
    learning_rate_sequence += [x.learning_rate for x in config.schedule]
    learning_rate = learning_schedules.manual_stepping(
        tf.train.get_or_create_global_step(), learning_rate_step_boundaries,
        learning_rate_sequence, config.warmup)

  if learning_rate_type == 'cosine_decay_learning_rate':
    config = learning_rate_config.cosine_decay_learning_rate
    learning_rate = learning_schedules.cosine_decay_with_warmup(
        tf.train.get_or_create_global_step(),
        config.learning_rate_base,
        config.total_steps,
        config.warmup_learning_rate,
        config.warmup_steps,
        config.hold_base_rate_steps)

  if learning_rate is None:
    raise ValueError('Learning_rate %s not supported.' % learning_rate_type)

  return learning_rate
Ejemplo n.º 7
0
def _create_learning_rate(learning_rate_config, global_summaries):
  """Create optimizer learning rate based on config.

  Args:
    learning_rate_config: A LearningRate proto message.
    global_summaries: A set to attach learning rate summary to.

  Returns:
    A learning rate.

  Raises:
    ValueError: when using an unsupported input data type.
  """
  learning_rate = None
  learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
  if learning_rate_type == 'constant_learning_rate':
    config = learning_rate_config.constant_learning_rate
    learning_rate = config.learning_rate

  if learning_rate_type == 'exponential_decay_learning_rate':
    config = learning_rate_config.exponential_decay_learning_rate
    learning_rate = tf.train.exponential_decay(
        config.initial_learning_rate,
        tf.train.get_or_create_global_step(),
        config.decay_steps,
        config.decay_factor,
        staircase=config.staircase)

  if learning_rate_type == 'manual_step_learning_rate':
    config = learning_rate_config.manual_step_learning_rate
    if not config.schedule:
      raise ValueError('Empty learning rate schedule.')
    learning_rate_step_boundaries = [x.step for x in config.schedule]
    learning_rate_sequence = [config.initial_learning_rate]
    learning_rate_sequence += [x.learning_rate for x in config.schedule]
    learning_rate = learning_schedules.manual_stepping(
        tf.train.get_or_create_global_step(), learning_rate_step_boundaries,
        learning_rate_sequence)

  if learning_rate_type == 'cosine_decay_learning_rate':
    config = learning_rate_config.cosine_decay_learning_rate
    learning_rate = learning_schedules.cosine_decay_with_warmup(
        tf.train.get_or_create_global_step(),
        config.learning_rate_base,
        config.total_steps,
        config.warmup_learning_rate,
        config.warmup_steps)

  if learning_rate is None:
    raise ValueError('Learning_rate %s not supported.' % learning_rate_type)

  global_summaries.add(tf.summary.scalar('Learning_Rate', learning_rate))
  return learning_rate
 def testManualStepping(self):
   global_step = tf.placeholder(tf.int64, [])
   boundaries = [2, 3, 7]
   rates = [1.0, 2.0, 3.0, 4.0]
   exp_rates = [1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0]
   learning_rate = learning_schedules.manual_stepping(global_step, boundaries,
                                                      rates)
   with self.test_session() as sess:
     output_rates = []
     for input_global_step in range(10):
       output_rate = sess.run(learning_rate,
                              feed_dict={global_step: input_global_step})
       output_rates.append(output_rate)
     self.assertAllClose(output_rates, exp_rates)
 def testManualStepping(self):
     global_step = tf.placeholder(tf.int64, [])
     boundaries = [2, 3, 7]
     rates = [1.0, 2.0, 3.0, 4.0]
     exp_rates = [1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0]
     learning_rate = learning_schedules.manual_stepping(
         global_step, boundaries, rates)
     with self.test_session() as sess:
         output_rates = []
         for input_global_step in range(10):
             output_rate = sess.run(
                 learning_rate, feed_dict={global_step: input_global_step})
             output_rates.append(output_rate)
         self.assertAllClose(output_rates, exp_rates)
Ejemplo n.º 10
0
def _create_learning_rate(learning_rate_config, global_step=None):
  
  if global_step is None:
    global_step = tf.train.get_or_create_global_step()
  learning_rate = None
  learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
  if learning_rate_type == 'constant_learning_rate':
    config = learning_rate_config.constant_learning_rate
    learning_rate = tf.constant(config.learning_rate, dtype=tf.float32,
                                name='learning_rate')

  if learning_rate_type == 'exponential_decay_learning_rate':
    config = learning_rate_config.exponential_decay_learning_rate
    learning_rate = learning_schedules.exponential_decay_with_burnin(
        global_step,
        config.initial_learning_rate,
        config.decay_steps,
        config.decay_factor,
        burnin_learning_rate=config.burnin_learning_rate,
        burnin_steps=config.burnin_steps,
        min_learning_rate=config.min_learning_rate,
        staircase=config.staircase)

  if learning_rate_type == 'manual_step_learning_rate':
    config = learning_rate_config.manual_step_learning_rate
    if not config.schedule:
      raise ValueError('Empty learning rate schedule.')
    learning_rate_step_boundaries = [x.step for x in config.schedule]
    learning_rate_sequence = [config.initial_learning_rate]
    learning_rate_sequence += [x.learning_rate for x in config.schedule]
    learning_rate = learning_schedules.manual_stepping(
        global_step, learning_rate_step_boundaries,
        learning_rate_sequence, config.warmup)

  if learning_rate_type == 'cosine_decay_learning_rate':
    config = learning_rate_config.cosine_decay_learning_rate
    learning_rate = learning_schedules.cosine_decay_with_warmup(
        global_step,
        config.learning_rate_base,
        config.total_steps,
        config.warmup_learning_rate,
        config.warmup_steps,
        config.hold_base_rate_steps)

  if learning_rate is None:
    raise ValueError('Learning_rate %s not supported.' % learning_rate_type)

  return learning_rate
def _create_learning_rate(learning_rate_config, global_summaries):
  """Create optimizer learning rate based on config.

  Args:
    learning_rate_config: A LearningRate proto message.
    global_summaries: A set to attach learning rate summary to.

  Returns:
    A learning rate.

  Raises:
    ValueError: when using an unsupported input data type.
  """
  learning_rate = None
  learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
  if learning_rate_type == 'constant_learning_rate':
    config = learning_rate_config.constant_learning_rate
    learning_rate = config.learning_rate

  if learning_rate_type == 'exponential_decay_learning_rate':
    config = learning_rate_config.exponential_decay_learning_rate
    learning_rate = tf.train.exponential_decay(
        config.initial_learning_rate,
        slim.get_or_create_global_step(),
        config.decay_steps,
        config.decay_factor,
        staircase=config.staircase)

  if learning_rate_type == 'manual_step_learning_rate':
    config = learning_rate_config.manual_step_learning_rate
    if not config.schedule:
      raise ValueError('Empty learning rate schedule.')
    learning_rate_step_boundaries = [x.step for x in config.schedule]
    learning_rate_sequence = [config.initial_learning_rate]
    learning_rate_sequence += [x.learning_rate for x in config.schedule]
    learning_rate = learning_schedules.manual_stepping(
        slim.get_or_create_global_step(), learning_rate_step_boundaries,
        learning_rate_sequence)

  if learning_rate is None:
    raise ValueError('Learning_rate %s not supported.' % learning_rate_type)

  global_summaries.add(tf.summary.scalar('Learning Rate', learning_rate))
  return learning_rate
Ejemplo n.º 12
0
 def graph_fn(global_step):
     boundaries = []
     rates = [0.01]
     learning_rate = learning_schedules.manual_stepping(
         global_step, boundaries, rates)
     return (learning_rate, )
 def graph_fn(global_step):
     boundaries = [2, 3, 7]
     rates = [1.0, 2.0, 3.0, 4.0]
     learning_rate = learning_schedules.manual_stepping(
         global_step, boundaries, rates)
     return (learning_rate, )
Ejemplo n.º 14
0
 def graph_fn(global_step):
   boundaries = []
   rates = [0.01]
   learning_rate = learning_schedules.manual_stepping(
       global_step, boundaries, rates)
   return (learning_rate,)