def test_init(self): params = [torch.nn.Parameter(torch.randn(2, 3, 4))] try: optimizer = Optimizer(torch.optim.Adam(params)) except BaseException: self.fail("__init__ failed.") self.assertEqual(optimizer.max_grad_norm, 0)
def test_update(self): params = [torch.nn.Parameter(torch.randn(2, 3, 4))] optimizer = Optimizer(torch.optim.Adam(params, lr=1), max_grad_norm=5) scheduler = StepLR(optimizer.optimizer, 1, gamma=0.1) optimizer.set_scheduler(scheduler) optimizer.update(10, 0) optimizer.update(10, 1) self.assertEqual(optimizer.optimizer.param_groups[0]['lr'], 0.1)
def test_step(self, mock_clip_grad_norm): params = [torch.nn.Parameter(torch.randn(2, 3, 4))] optim = Optimizer(torch.optim.Adam(params), max_grad_norm=5) optim.step() mock_clip_grad_norm.assert_called_once()
def train(self, model, data, dev_data, num_epochs=5, resume_training=False, monitor_data={}, optimizer=None, teacher_forcing_ratio=0, custom_callbacks=[], learning_rate=0.001, checkpoint_path=None, top_k=5, losses=[NLLLoss()], loss_weights=None, metrics=[], random_seed=None, checkpoint_every=100, print_every=100): """ Run training for a given model. Args: model (machine.models): model to run training on, if `resume=True`, it would be overwritten by the model loaded from the latest checkpoint. data (torchtext.data.Iterator: torchtext iterator object to train on num_epochs (int, optional): number of epochs to run (default 5) resume_training(bool, optional): resume training with the latest checkpoint up until the number of epochs (default False) dev_data (torchtext.data.Iterator): dev/validation set iterator Note: must not pass in the train iterator here as this gets evaluated during training (in between batches) If you want to evaluate on the full train during training then make two iterators and pass the second one here monitor_data (list of torchtext.data.Iterator, optional): list of iterators to test on (default None) Note: must not pass in the train iterator here as this gets evaluated during training (in between batches) If you want to evaluate on the full train during training then make two iterators and pass the second one here optimizer (machine.optim.Optimizer, optional): optimizer for training (default: Optimizer(pytorch.optim.Adam, max_grad_norm=5)) teacher_forcing_ratio (float, optional): teaching forcing ratio (default 0) custom_callbacks (list, optional): list of custom call backs (see utils.callbacks.callback for base class) learing_rate (float, optional): learning rate used by the optimizer (default 0.001) checkpoint_path (str, optional): path to load checkpoint from in case training should be resumed top_k (int): how many models should be stored during training loss (list, optional): list of machine.loss.Loss objects for training (default: [machine.loss.NLLLoss]) metrics (list, optional): list of machine.metric.metric objects to be computed during evaluation checkpoint_every (int, optional): number of epochs to checkpoint after, (default: 100) print_every (int, optional): number of iterations to print after, (default: 100) Returns: model (machine.models): trained model. """ self.set_local_parameters(random_seed, losses, metrics, loss_weights, checkpoint_every, print_every) # If training is set to resume if resume_training: resume_checkpoint = Checkpoint.load(checkpoint_path) model = resume_checkpoint.model self.model = model self.optimizer = resume_checkpoint.optimizer # A walk around to set optimizing parameters properly resume_optim = self.optimizer.optimizer defaults = resume_optim.param_groups[0] defaults.pop('params', None) defaults.pop('initial_lr', None) self.optimizer.optimizer = resume_optim.__class__( self.model.parameters(), **defaults) start_epoch = resume_checkpoint.epoch step = resume_checkpoint.step else: start_epoch = 1 step = 0 self.model = model def get_optim(optim_name): optims = {'adam': optim.Adam, 'adagrad': optim.Adagrad, 'adadelta': optim.Adadelta, 'adamax': optim.Adamax, 'rmsprop': optim.RMSprop, 'sgd': optim.SGD, None: optim.Adam} return optims[optim_name] self.optimizer = Optimizer(get_optim(optimizer)(self.model.parameters(), lr=learning_rate), max_grad_norm=5) self.logger.info("Optimizer: %s, Scheduler: %s" % (self.optimizer.optimizer, self.optimizer.scheduler)) callbacks = CallbackContainer(self, [Logger(), ModelCheckpoint(top_k=top_k), History()] + custom_callbacks) logs = self._train_epoches(data, num_epochs, start_epoch, step, dev_data=dev_data, monitor_data=monitor_data, callbacks=callbacks, teacher_forcing_ratio=teacher_forcing_ratio) return self.model, logs
def train(self, model, data, num_epochs=5, resume=False, dev_data=None, monitor_data={}, optimizer=None, teacher_forcing_ratio=0, learning_rate=0.001, checkpoint_path=None, top_k=5): """ Run training for a given model. Args: model (machine.models): model to run training on, if `resume=True`, it would be overwritten by the model loaded from the latest checkpoint. data (machine.dataset.dataset.Dataset): dataset object to train on num_epochs (int, optional): number of epochs to run (default 5) resume(bool, optional): resume training with the latest checkpoint, (default False) dev_data (machine.dataset.dataset.Dataset, optional): dev Dataset (default None) optimizer (machine.optim.Optimizer, optional): optimizer for training (default: Optimizer(pytorch.optim.Adam, max_grad_norm=5)) teacher_forcing_ratio (float, optional): teaching forcing ratio (default 0) learing_rate (float, optional): learning rate used by the optimizer (default 0.001) checkpoint_path (str, optional): path to load checkpoint from in case training should be resumed top_k (int): how many models should be stored during training Returns: model (machine.models): trained model. """ # If training is set to resume if resume: resume_checkpoint = Checkpoint.load(checkpoint_path) model = resume_checkpoint.model self.optimizer = resume_checkpoint.optimizer # A walk around to set optimizing parameters properly resume_optim = self.optimizer.optimizer defaults = resume_optim.param_groups[0] defaults.pop('params', None) defaults.pop('initial_lr', None) self.optimizer.optimizer = resume_optim.__class__( model.parameters(), **defaults) start_epoch = resume_checkpoint.epoch step = resume_checkpoint.step else: start_epoch = 1 step = 0 def get_optim(optim_name): optims = { 'adam': optim.Adam, 'adagrad': optim.Adagrad, 'adadelta': optim.Adadelta, 'adamax': optim.Adamax, 'rmsprop': optim.RMSprop, 'sgd': optim.SGD, None: optim.Adam } return optims[optim_name] self.optimizer = Optimizer(get_optim(optimizer)(model.parameters(), lr=learning_rate), max_grad_norm=5) self.logger.info("Optimizer: %s, Scheduler: %s" % (self.optimizer.optimizer, self.optimizer.scheduler)) logs = self._train_epoches(data, model, num_epochs, start_epoch, step, dev_data=dev_data, monitor_data=monitor_data, teacher_forcing_ratio=teacher_forcing_ratio, top_k=top_k) return model, logs