示例#1
0
 def test_multiple_hparams(self):
   opt_dict = {
       'hp-beta1':
           optim.GradientDescent(learning_rate=1
                                ).create(np.array([0.0, 0.1, 0.2, 0.9])),
       'hp-learning_rate':
           optim.GradientDescent(learning_rate=1
                                ).create(np.array([0.0, 0.1, 0.2, 0.9])),
   }
   gv_dict = {
       'beta1': {
           'learning_rate_scalar': 1,
           'activation_fn': 'linear',
           'activation_ceiling': None,
           'activation_floor': None,
           'clip_min': None,
           'clip_max': None,
       },
       'learning_rate': {
           'learning_rate_scalar': 1,
           'activation_fn': 'linear',
           'activation_ceiling': None,
           'activation_floor': None,
           'clip_min': None,
           'clip_max': None,
       }
   }
   out = guided_parameters.get_activated_hparams(opt_dict, gv_dict)
   self.assertListEqual(list(out['hp-beta1']), [0.0, 0.1, 0.2, 0.9])
   self.assertListEqual(list(out['hp-learning_rate']), [0.0, 0.1, 0.2, 0.9])
示例#2
0
 def test_empty(self):
   opt_dict = {}
   gv_dict = {}
   out = guided_parameters.get_activated_hparams(opt_dict, gv_dict)
   self.assertEqual(
       out, {
           'hp-beta1': None,
           'hp-decay_rate': None,
           'hp-eps': None,
           'hp-learning_rate': None,
           'hp-weight_decay': None,
           'hp-label_smoothing': None,
       })
示例#3
0
 def test_ignores_model(self):
   opt_dict = {
       'model':
           optim.GradientDescent(learning_rate=1
                                ).create(np.array([0.0, 0.1, 0.2, 0.9])),
   }
   gv_dict = {}
   out = guided_parameters.get_activated_hparams(opt_dict, gv_dict)
   self.assertEqual(
       out, {
           'hp-beta1': None,
           'hp-decay_rate': None,
           'hp-eps': None,
           'hp-learning_rate': None,
           'hp-weight_decay': None,
           'hp-label_smoothing': None,
       })