Ejemplo n.º 1
0
 def _configure_basic_optimizer(self, model_parameters):
     optimizer_parameters = self.optimizer_params()
     if self.fp16_enabled() and 'max_grad_norm' in optimizer_parameters.keys():
         optimizer_parameters['max_grad_norm'] = 0.0
     if self.optimizer_name() == ADAM_OPTIMIZER:
         optimizer = FusedAdam(model_parameters, **optimizer_parameters)
     elif self.optimizer_name() == LAMB_OPTIMIZER:
         optimizer = FusedLamb(model_parameters, **optimizer_parameters)
     else:
         torch_optimizer = getattr(torch.optim, self.optimizer_name())
         optimizer = torch_optimizer(model_parameters, **optimizer_parameters)
     return optimizer
Ejemplo n.º 2
0
 def _configure_basic_optimizer(self, model_parameters):
     optimizer_parameters = self.optimizer_params()
     if 'max_grad_norm' in optimizer_parameters.keys():
         raise ValueError(
             "'max_grad_norm' is not supported as an optimizer parameter, please switch to using the deepspeed parameter 'gradient_clipping' see: https://www.deepspeed.ai/docs/config-json/#gradient-clipping for more details"
         )
     if self.optimizer_name() == ADAM_OPTIMIZER:
         from apex.optimizers.fused_adam import FusedAdam
         optimizer = FusedAdam(model_parameters, **optimizer_parameters)
     elif self.optimizer_name() == LAMB_OPTIMIZER:
         optimizer = FusedLamb(model_parameters, **optimizer_parameters)
     else:
         torch_optimizer = getattr(torch.optim, self.optimizer_name())
         optimizer = torch_optimizer(model_parameters, **optimizer_parameters)
     return optimizer