def get_config(self):
   serialized_optimizer = optimizers.serialize(self._optimizer)
   serialized_loss_scale = keras_loss_scale_module.serialize(self._loss_scale)
   return {
       'optimizer': serialized_optimizer,
       'loss_scale': serialized_loss_scale,
   }
Esempio n. 2
0
    def testSerializationWithBuiltInOptimizer(self, use_v1):
        opt = gradient_descent.SGD(2., momentum=0.5)
        if use_v1:
            loss_scale = tf_loss_scale_module.DynamicLossScale(
                initial_loss_scale=2., increment_period=3.)
            opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale)
        else:
            opt = loss_scale_optimizer.LossScaleOptimizer(
                opt, initial_scale=2., dynamic_growth_steps=3.)
        config = optimizers.serialize(opt)
        opt = optimizers.deserialize(config)
        # Force hyperparameters to be created
        opt.lr  # pylint: disable=pointless-statement
        self.evaluate(variables.global_variables_initializer())

        self.assertEqual(self.evaluate(opt.lr), 2.)
        self.assertEqual(self.evaluate(opt._optimizer.momentum), 0.5)
        self.assertEqual(self.evaluate(opt.loss_scale), 2.)
        self.assertEqual(opt.dynamic_growth_steps, 3.)
        self.assertTrue(opt.dynamic, 4.)
        # Deserializing a LossScaleOptimizer always always results in a V2
        # LossScaleOptimizer, even if serialized with a LossScaleOptimizerV1.
        self.assertAllEqual(type(opt), loss_scale_optimizer.LossScaleOptimizer)

        # Ensure the optimizer can be used
        var = variables.Variable([5.0])
        run_op = self._run_fn_with_grad_check(
            distribution_strategy_context.get_strategy(), var, opt, 2)()
        self.evaluate(variables.global_variables_initializer())
        self._run_if_in_graph_mode(run_op)
        self.assertEqual(self.evaluate(var), [3.])
        self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
Esempio n. 3
0
 def get_config(self):
     config = {
         'optimizer': optimizers.serialize(self._optimizer),
         'lr_multipliers': self._lr_multipliers
     }
     base_config = super(LearningRateMultiplier, self).get_config()
     return dict(list(base_config.items()) + list(config.items()))
Esempio n. 4
0
  def testSerializationWithCustomOptimizer(self):
    class MySGD(gradient_descent.SGD):

      def __init__(self, *args, **kwargs):
        super(MySGD, self).__init__(*args, **kwargs)
        self.my_attribute = 123

    opt = MySGD(2., momentum=0.5)
    loss_scale = loss_scale_module.DynamicLossScale(
        initial_loss_scale=2., increment_period=3.,
        multiplier=4.)
    opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
    config = optimizers.serialize(opt)
    custom_objects = {'MySGD': MySGD}
    opt = optimizers.deserialize(config, custom_objects=custom_objects)
    # Force hyperparameters to be created
    opt.lr  # pylint: disable=pointless-statement
    self.evaluate(variables.global_variables_initializer())

    self.assertEqual(self.evaluate(opt.lr), 2.)
    self.assertEqual(self.evaluate(opt._optimizer.momentum), 0.5)
    self.assertEqual(self.evaluate(opt.loss_scale()), 2.)
    self.assertEqual(opt.loss_scale.increment_period, 3.)
    self.assertEqual(opt.loss_scale.multiplier, 4.)
    self.assertEqual(opt._optimizer.my_attribute, 123)
Esempio n. 5
0
 def valid_optimizer(optimizer):
     if optimizer and isinstance(optimizer, dict):
         class_name = optimizer.get('class_name')
         optimizer = get(class_name).from_config(optimizer.get('config', {}))
         optimizer = serialize(optimizer)
     elif isinstance(optimizer, DataFrame):
         optimizer = json.loads(optimizer.first().optimizer)
     return optimizer
 def get_config(self):
   serialized_optimizer = optimizers.serialize(self._optimizer)
   return {
       'optimizer': serialized_optimizer,
       'dynamic': self.dynamic,
       'initial_scale': self.initial_scale,
       'dynamic_growth_steps': self.dynamic_growth_steps,
   }
Esempio n. 7
0
    def testSerializationWithBuiltInOptimizer(self):
        opt = gradient_descent.SGD(2., momentum=0.5)
        loss_scale = loss_scale_module.DynamicLossScale(initial_loss_scale=2.,
                                                        increment_period=3.,
                                                        multiplier=4.)
        opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
        config = optimizers.serialize(opt)
        opt = optimizers.deserialize(config)
        # Force hyperparameters to be created
        opt.lr  # pylint: disable=pointless-statement
        self.evaluate(variables.global_variables_initializer())

        self.assertEqual(self.evaluate(opt.lr), 2.)
        self.assertEqual(self.evaluate(opt._optimizer.momentum), 0.5)
        self.assertEqual(self.evaluate(opt.loss_scale()), 2.)
        self.assertEqual(opt.loss_scale.increment_period, 3.)
        self.assertEqual(opt.loss_scale.multiplier, 4.)
Esempio n. 8
0
 def __add_compile_layer(self, layer):
     return self._add_or_create_column('optimizer',
                                       json.dumps(serialize(layer)))