def testBuildEmptyOptimizer(self):
     optimizer_text_proto = """
 """
     optimizer_proto = optimizer_pb2.Optimizer()
     text_format.Merge(optimizer_text_proto, optimizer_proto)
     with self.assertRaises(ValueError):
         optimizer_builder.build(optimizer_proto)
 def testBuildEmptyOptimizer(self):
     optimizer_text_proto = """
 """
     global_summaries = set([])
     optimizer_proto = optimizer_pb2.Optimizer()
     text_format.Merge(optimizer_text_proto, optimizer_proto)
     with self.assertRaises(ValueError):
         optimizer_builder.build(optimizer_proto, global_summaries)
 def testBuildAdamOptimizer(self):
     optimizer_text_proto = """
   adam_optimizer: {
     learning_rate: {
       constant_learning_rate {
         learning_rate: 0.002
       }
     }
   }
   use_moving_average: false
 """
     optimizer_proto = optimizer_pb2.Optimizer()
     text_format.Merge(optimizer_text_proto, optimizer_proto)
     optimizer, _ = optimizer_builder.build(optimizer_proto)
     self.assertTrue(isinstance(optimizer, tf.train.AdamOptimizer))
Example #4
0
 def testBuildMovingAverageOptimizer(self):
   optimizer_text_proto = """
     adam_optimizer: {
       learning_rate: {
         constant_learning_rate {
           learning_rate: 0.002
         }
       }
     }
     use_moving_average: True
   """
   optimizer_proto = optimizer_pb2.Optimizer()
   text_format.Merge(optimizer_text_proto, optimizer_proto)
   optimizer, _ = optimizer_builder.build(optimizer_proto)
   self.assertIsInstance(optimizer, contrib_opt.MovingAverageOptimizer)
 def testMovingAverageOptimizerUnsupported(self):
     optimizer_text_proto = """
   adam_optimizer: {
     learning_rate: {
       constant_learning_rate {
         learning_rate: 0.002
       }
     }
   }
   use_moving_average: True
 """
     optimizer_proto = optimizer_pb2.Optimizer()
     text_format.Merge(optimizer_text_proto, optimizer_proto)
     with self.assertRaises(ValueError):
         optimizer_builder.build(optimizer_proto)
 def testBuildMomentumOptimizer(self):
     optimizer_text_proto = """
   momentum_optimizer: {
     learning_rate: {
       constant_learning_rate {
         learning_rate: 0.001
       }
     }
     momentum_optimizer_value: 0.99
   }
   use_moving_average: false
 """
     optimizer_proto = optimizer_pb2.Optimizer()
     text_format.Merge(optimizer_text_proto, optimizer_proto)
     optimizer, _ = optimizer_builder.build(optimizer_proto)
     self.assertTrue(isinstance(optimizer, tf.train.MomentumOptimizer))
 def testBuildMovingAverageOptimizer(self):
     optimizer_text_proto = """
   adam_optimizer: {
     learning_rate: {
       constant_learning_rate {
         learning_rate: 0.002
       }
     }
   }
   use_moving_average: True
 """
     global_summaries = set([])
     optimizer_proto = optimizer_pb2.Optimizer()
     text_format.Merge(optimizer_text_proto, optimizer_proto)
     optimizer = optimizer_builder.build(optimizer_proto, global_summaries)
     self.assertTrue(
         isinstance(optimizer, tf.contrib.opt.MovingAverageOptimizer))
Example #8
0
 def testBuildMovingAverageOptimizerWithNonDefaultDecay(self):
   optimizer_text_proto = """
     adam_optimizer: {
       learning_rate: {
         constant_learning_rate {
           learning_rate: 0.002
         }
       }
     }
     use_moving_average: True
     moving_average_decay: 0.2
   """
   optimizer_proto = optimizer_pb2.Optimizer()
   text_format.Merge(optimizer_text_proto, optimizer_proto)
   optimizer, _ = optimizer_builder.build(optimizer_proto)
   self.assertIsInstance(optimizer, contrib_opt.MovingAverageOptimizer)
   # TODO(rathodv): Find a way to not depend on the private members.
   self.assertAlmostEqual(optimizer._ema._decay, 0.2)
 def testBuildRMSPropOptimizer(self):
     optimizer_text_proto = """
   rms_prop_optimizer: {
     learning_rate: {
       exponential_decay_learning_rate {
         initial_learning_rate: 0.004
         decay_steps: 800720
         decay_factor: 0.95
       }
     }
     momentum_optimizer_value: 0.9
     decay: 0.9
     epsilon: 1.0
   }
   use_moving_average: false
 """
     optimizer_proto = optimizer_pb2.Optimizer()
     text_format.Merge(optimizer_text_proto, optimizer_proto)
     optimizer, _ = optimizer_builder.build(optimizer_proto)
     self.assertTrue(isinstance(optimizer, tf.train.RMSPropOptimizer))
 def testBuildMovingAverageOptimizerWithNonDefaultDecay(self):
   optimizer_text_proto = """
     adam_optimizer: {
       learning_rate: {
         constant_learning_rate {
           learning_rate: 0.002
         }
       }
     }
     use_moving_average: True
     moving_average_decay: 0.2
   """
   global_summaries = set([])
   optimizer_proto = optimizer_pb2.Optimizer()
   text_format.Merge(optimizer_text_proto, optimizer_proto)
   optimizer = optimizer_builder.build(optimizer_proto, global_summaries)
   self.assertTrue(
       isinstance(optimizer, tf.contrib.opt.MovingAverageOptimizer))
   # TODO: Find a way to not depend on the private members.
   self.assertAlmostEqual(optimizer._ema._decay, 0.2)