예제 #1
0
 def testRaiseErrorOnEmptyLearningRate(self):
   learning_rate_text_proto = """
   """
   learning_rate_proto = optimizer_pb2.LearningRate()
   text_format.Merge(learning_rate_text_proto, learning_rate_proto)
   with self.assertRaises(ValueError):
     optimizer_builder._create_learning_rate(learning_rate_proto)
예제 #2
0
 def testBuildConstantLearningRate(self):
   learning_rate_text_proto = """
     constant_learning_rate {
       learning_rate: 0.004
     }
   """
   learning_rate_proto = optimizer_pb2.LearningRate()
   text_format.Merge(learning_rate_text_proto, learning_rate_proto)
   learning_rate = optimizer_builder._create_learning_rate(
       learning_rate_proto)
   self.assertTrue(learning_rate.op.name.endswith('learning_rate'))
   with self.test_session():
     learning_rate_out = learning_rate.eval()
   self.assertAlmostEqual(learning_rate_out, 0.004)
예제 #3
0
 def testBuildCosineDecayLearningRate(self):
   learning_rate_text_proto = """
     cosine_decay_learning_rate {
       learning_rate_base: 0.002
       total_steps: 20000
       warmup_learning_rate: 0.0001
       warmup_steps: 1000
       hold_base_rate_steps: 20000
     }
   """
   learning_rate_proto = optimizer_pb2.LearningRate()
   text_format.Merge(learning_rate_text_proto, learning_rate_proto)
   learning_rate = optimizer_builder._create_learning_rate(
       learning_rate_proto)
   self.assertTrue(isinstance(learning_rate, tf.Tensor))
예제 #4
0
 def testBuildExponentialDecayLearningRate(self):
   learning_rate_text_proto = """
     exponential_decay_learning_rate {
       initial_learning_rate: 0.004
       decay_steps: 99999
       decay_factor: 0.85
       staircase: false
     }
   """
   learning_rate_proto = optimizer_pb2.LearningRate()
   text_format.Merge(learning_rate_text_proto, learning_rate_proto)
   learning_rate = optimizer_builder._create_learning_rate(
       learning_rate_proto)
   self.assertTrue(learning_rate.op.name.endswith('learning_rate'))
   self.assertTrue(isinstance(learning_rate, tf.Tensor))
예제 #5
0
 def testBuildManualStepLearningRate(self):
   learning_rate_text_proto = """
     manual_step_learning_rate {
       initial_learning_rate: 0.002
       schedule {
         step: 100
         learning_rate: 0.006
       }
       schedule {
         step: 90000
         learning_rate: 0.00006
       }
       warmup: true
     }
   """
   learning_rate_proto = optimizer_pb2.LearningRate()
   text_format.Merge(learning_rate_text_proto, learning_rate_proto)
   learning_rate = optimizer_builder._create_learning_rate(
       learning_rate_proto)
   self.assertTrue(isinstance(learning_rate, tf.Tensor))