def get_config(self): serialized_optimizer = optimizers.serialize(self._optimizer) serialized_loss_scale = keras_loss_scale_module.serialize(self._loss_scale) return { 'optimizer': serialized_optimizer, 'loss_scale': serialized_loss_scale, }
def get_config(self): config = {'name': self.name} if not self._using_default_loss_scale: # We only include the loss scale if the default loss scale is not used. # This allows us to change the loss scale config format without breaking # users who use the default loss scale. config['loss_scale'] = keras_loss_scale_module.serialize( self.loss_scale) return config
def test_serialization(self): loss_scale = loss_scale_module.DynamicLossScale( initial_loss_scale=1, increment_period=2, multiplier=3) config = loss_scale_module.serialize(loss_scale) loss_scale = loss_scale_module.deserialize(config) self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(loss_scale()), 1) self.assertEqual(loss_scale.increment_period, 2) self.assertEqual(loss_scale.multiplier, 3)
def test_serialization(self): loss_scale = loss_scale_module.DynamicLossScale(initial_loss_scale=1, increment_period=2, multiplier=3) config = loss_scale_module.serialize(loss_scale) loss_scale = loss_scale_module.deserialize(config) self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(loss_scale()), 1) self.assertEqual(loss_scale.increment_period, 2) self.assertEqual(loss_scale.multiplier, 3)
def test_serialization(self): loss_scale = loss_scale_module.get(123) config = loss_scale_module.serialize(loss_scale) loss_scale = loss_scale_module.deserialize(config) self.assertEqual(self.evaluate(loss_scale()), 123.)
def test_serialization(self): loss_scale = loss_scale_module.get(123) config = loss_scale_module.serialize(loss_scale) loss_scale = loss_scale_module.deserialize(config) self.assertEqual(self.evaluate(loss_scale()), 123.)