def set_up_training(self, state: TrainingState, training_data: BatchIterator): if cuda.CUDA_ENABLED: state.model.cuda() state.scheduler.prepare(training_data, self.config.epochs) if cuda.DISTRIBUTED_WORLD_SIZE > 1: device_id = torch.cuda.current_device() state.model = DistributedModel( module=state.model, device_ids=[device_id], output_device=device_id, broadcast_buffers=False, find_unused_parameters=state.model.find_unused_parameters, process_group=distributed._round_robin_process_group, ) state.model.register_comm_hook( distributed._round_robin_process_group, fp16_compress_hook) state.start_time = time.time() if self.config.num_batches_per_epoch: # Set the training_data iterator to cycle, so it will never run out, # but rather after reaching the end will loop back to the beginning. training_data = cycle(training_data) return training_data
def set_up_training(self, state: TrainingState, training_data: BatchIterator): if cuda.CUDA_ENABLED: state.model.cuda() state.scheduler.prepare(training_data, self.config.epochs) if cuda.DISTRIBUTED_WORLD_SIZE > 1: device_id = torch.cuda.current_device() state.model = DistributedModel( module=state.model, device_ids=[device_id], output_device=device_id, broadcast_buffers=False, find_unused_parameters=state.model.find_unused_parameters, ) state.start_time = time.time()
def train( self, train_iter: BatchIterator, eval_iter: BatchIterator, model: Model, metric_reporter: MetricReporter, train_config: PyTextConfig, optimizer: torch.optim.Optimizer, scheduler=None, rank: int = 0, ) -> Tuple[torch.nn.Module, Any]: """ Train and eval a model, the model states will be modified. This function iterates epochs specified in config, and for each epoch do: 1. Train model using training data, aggregate and report training results 2. Adjust learning rate if scheduler is specified 3. Evaluate model using evaluation data 4. Calculate metrics based on evaluation results and select best model Args: train_iter (BatchIterator): batch iterator of training data eval_iter (BatchIterator): batch iterator of evaluation data model (Model): model to be trained metric_reporter (MetricReporter): compute metric based on training output and report results to console, file.. etc train_config (PyTextConfig): training config optimizer (torch.optim.Optimizer): torch optimizer to be used scheduler (Optional[torch.optim.lr_scheduler]): learning rate scheduler, default is None training_result (Optional): only meaningful for Hogwild training. default is None rank (int): only used in distributed training, the rank of the current training thread, evaluation will only be done in rank 0 Returns: model, best_metric: the trained model together with the best metric """ timer = time_utils.StageTimer() world_size = 1 if cuda_utils.CUDA_ENABLED: model = model.cuda() world_size = cuda_utils.DISTRIBUTED_WORLD_SIZE if world_size > 1: device_id = torch.cuda.current_device() model = DistributedModel( module=model, device_ids=[device_id], output_device=device_id, broadcast_buffers=False, ) timer.add_stage(stage="init_distributed_model") best_metric = None last_best_epoch = 0 scheduler = self._prepare_scheduler(train_iter, scheduler) timer.add_stage(stage="pre_training") def training_pre_batch_callback(): if world_size > 1: # replace optimizer.zero_grad() here to work with DDP # in cases where some parameters don't receive grads at each step # loss.backward will set grad for params in the computation graph # we can thus follow which params are left out and call .backward # on them manually for p in model.parameters(): if p.grad is not None: p.grad.detach_() p.grad = None else: optimizer.zero_grad() def training_backprop(loss, timer): loss.backward() if world_size > 1: # DDP fix when some parameters don't receive grads for p in model.parameters(): if p.requires_grad and p.grad is None: p.backward(torch.zeros_like(p.data)) timer.add_stage("backward") if scheduler: scheduler.step_batch() if self.config.max_clip_norm is not None: grad_norm = torch.nn.utils.clip_grad_norm_( model.parameters(), self.config.max_clip_norm) else: grad_norm = None optimizer.step() timer.add_stage("update_grads") # grad_norm could be used to check grads sync in distributed training return grad_norm time_start = time.time() for epoch in range(1, self.config.epochs + 1): if self.config.target_time_limit_seconds > 0 and epoch > 1: time_elapsed = time.time() - time_start mean_epoch_time = time_elapsed / float(epoch - 1) expected_next_epoch_time = time_elapsed + mean_epoch_time if expected_next_epoch_time > self.config.target_time_limit_seconds: print( f"Training stopped after {epoch - 1} epochs and " f"{int(time_elapsed)} seconds, due to the target max training " f"time of {self.config.target_time_limit_seconds} seconds." ) break print(f"Rank {rank} worker: Starting epoch #{epoch}") model.train() lrs = (str(lr) for lr in learning_rates(optimizer)) print(f"Learning rate(s): {', '.join(lrs)}") self._run_epoch( Stage.TRAIN, epoch, train_iter, model, metric_reporter, pre_batch=training_pre_batch_callback, backprop=training_backprop, rank=rank, ) timer.add_stage(stage=f"epoch_train") model.eval(Stage.EVAL) with torch.no_grad(): eval_metric = self._run_epoch(Stage.EVAL, epoch, eval_iter, model, metric_reporter, rank=rank) timer.add_stage(stage=f"epoch_eval") # Step the learning rate scheduler(s) if scheduler: assert eval_metric is not None scheduler.step( metrics=metric_reporter.get_model_select_metric( eval_metric), epoch=epoch, ) # choose best model. if metric_reporter.compare_metric(eval_metric, best_metric): last_best_epoch = epoch best_metric = eval_metric # Only rank = 0 trainer saves modules. if train_config.save_module_checkpoints and rank == 0: model.save_modules(base_path=train_config.modules_save_dir, suffix=f"-ep{epoch}") if rank == 0: print(f"Rank {rank} worker: Found a better model!") model_state = model.state_dict() # save to cpu to avoid multiple model copies in gpu memory if cuda_utils.CUDA_ENABLED: for key, state in model_state.items(): model_state[key] = state.cpu() best_model_state = model_state timer.add_stage(stage=f"epoch_save/load_module") if self.config.early_stop_after > 0 and ( epoch - last_best_epoch == self.config.early_stop_after): print(f"Rank {rank} worker: Eval metric hasn't changed for " + f"{self.config.early_stop_after} epochs. Stopping now.") break sys.stdout.flush() if rank == 0: if cuda_utils.CUDA_ENABLED: for key, state in best_model_state.items(): best_model_state[key] = state.cuda() model.load_state_dict(best_model_state) timer.report("Trainer train timer") return model, best_metric
def train( self, train_iter: BatchIterator, eval_iter: BatchIterator, model: Model, metric_reporter: MetricReporter, train_config: PyTextConfig, optimizers: List[torch.optim.Optimizer], scheduler=None, rank: int = 0, ) -> Tuple[torch.nn.Module, Any]: """ Train and eval a model, the model states will be modified. This function iterates epochs specified in config, and for each epoch do: 1. Train model using training data, aggregate and report training results 2. Adjust learning rate if scheduler is specified 3. Evaluate model using evaluation data 4. Calculate metrics based on evaluation results and select best model Args: train_iter (BatchIterator): batch iterator of training data eval_iter (BatchIterator): batch iterator of evaluation data model (Model): model to be trained metric_reporter (MetricReporter): compute metric based on training output and report results to console, file.. etc train_config (PyTextConfig): training config optimizers (List[torch.optim.Optimizer]): a list of torch optimizers, in most of the case only contains one optimizer scheduler (Optional[torch.optim.lr_scheduler]): learning rate scheduler, default is None training_result (Optional): only meaningful for Hogwild training. default is None rank (int): only used in distributed training, the rank of the current training thread, evaluation will only be done in rank 0 Returns: model, best_metric: the trained model together with the best metric """ if cuda_utils.CUDA_ENABLED: model = model.cuda() if cuda_utils.DISTRIBUTED_WORLD_SIZE > 1: device_id = torch.cuda.current_device() model = DistributedModel( module=model, device_ids=[device_id], output_device=device_id, broadcast_buffers=False, ) best_metric = None last_best_epoch = 0 best_model_state = None scheduler = self._prepare_scheduler(train_iter, scheduler) def training_pre_batch_callback(): optimizer_zero_grad(optimizers) def training_backprop(loss): loss.backward() if scheduler: scheduler.step_batch() if self.config.max_clip_norm is not None: grad_norm = torch.nn.utils.clip_grad_norm_( model.parameters(), self.config.max_clip_norm) else: grad_norm = None optimizer_step(optimizers) # grad_norm could be used to check grads sync in distributed training return grad_norm for epoch in range(1, self.config.epochs + 1): print(f"Rank {rank} worker: Starting epoch #{epoch}") model.train() lrs = (str(lr) for lr in learning_rates(optimizers)) print(f"Learning rate(s): {', '.join(lrs)}") self._run_epoch( Stage.TRAIN, epoch, train_iter, model, metric_reporter, pre_batch=training_pre_batch_callback, backprop=training_backprop, rank=rank, ) model.eval(Stage.EVAL) eval_metric = self._run_epoch(Stage.EVAL, epoch, eval_iter, model, metric_reporter, rank=rank) # Step the learning rate scheduler(s) if scheduler: assert eval_metric is not None scheduler.step( metrics=metric_reporter.get_model_select_metric( eval_metric), epoch=epoch, ) # choose best model. if metric_reporter.compare_metric(eval_metric, best_metric): print( f"Rank {rank} worker: Found a better model! Saving the model state." ) last_best_epoch = epoch best_metric = eval_metric # Only rank = 0 trainer saves modules. if train_config.save_module_checkpoints and rank == 0: model.save_modules(base_path=train_config.modules_save_dir, suffix=f"-ep{epoch}") best_model_state = copy.deepcopy(model.state_dict()) if self.config.early_stop_after > 0 and ( epoch - last_best_epoch == self.config.early_stop_after): print(f"Rank {rank} worker: Eval metric hasn't changed for " + f"{self.config.early_stop_after} epochs. Stopping now.") break sys.stdout.flush() model.load_state_dict(best_model_state) return model, best_metric
def train( self, train_iter: BatchIterator, eval_iter: BatchIterator, model: Model, metric_reporter: MetricReporter, train_config: PyTextConfig, optimizers: List[torch.optim.Optimizer], scheduler=None, rank: int = 0, ) -> Tuple[torch.nn.Module, Any]: if cuda_utils.CUDA_ENABLED: model = model.cuda() if cuda_utils.DISTRIBUTED_WORLD_SIZE > 1: device_id = torch.cuda.current_device() model = DistributedModel( module=model, device_ids=[device_id], output_device=device_id, broadcast_buffers=False, ) best_metric = None last_best_epoch = 0 best_model_path = None scheduler = self._prepare_scheduler(train_iter, scheduler) def training_pre_batch_callback(): optimizer_zero_grad(optimizers) def training_backprop(loss): loss.backward() if scheduler: scheduler.step_batch() if self.config.max_clip_norm is not None: grad_norm = torch.nn.utils.clip_grad_norm_( model.parameters(), self.config.max_clip_norm) else: grad_norm = None optimizer_step(optimizers) # grad_norm could be used to check grads sync in distributed training return grad_norm len_sched_ix = 0 # Used since we need the infinite iterator (only created and called once) def batch_generator_for_epoch(it): n = len(it) while n > 0: yield next(it) n -= 1 for epoch in range(self.config.start_epoch, self.config.epochs + 1): # Set the dialogue length in the fields, to be used by the postprocessor while self.config.length_schedule_per_epoch \ and len_sched_ix < len(self.config.length_schedule_per_epoch) \ and epoch >= self.config.length_schedule_per_epoch[len_sched_ix][0]: train_iter.max_n_turns = \ self.config.length_schedule_per_epoch[len_sched_ix][1] eval_iter.max_n_turns = \ self.config.length_schedule_per_epoch[len_sched_ix][1] len_sched_ix += 1 LOG.info(f"\nRank {rank} worker: Starting epoch #{epoch}") model.train() lrs = (str(lr) for lr in learning_rates(optimizers)) LOG.info(f"Learning rate(s): {', '.join(lrs)}") self._run_epoch( Stage.TRAIN, epoch, batch_generator_for_epoch(train_iter), model, metric_reporter, pre_batch=training_pre_batch_callback, backprop=training_backprop, rank=rank, ) model.eval(Stage.EVAL) with torch.no_grad(): eval_metric = self._run_epoch( Stage.EVAL, epoch, batch_generator_for_epoch(eval_iter), model, metric_reporter, rank=rank) # Step the learning rate scheduler(s) if scheduler: assert eval_metric is not None scheduler.step( metrics=metric_reporter.get_model_select_metric( eval_metric), epoch=epoch, ) # choose best model. if metric_reporter.compare_metric(eval_metric, best_metric): LOG.info( f"Rank {rank} worker: Found a better model! Saving the model state for epoch #{epoch}." ) last_best_epoch = epoch best_metric = eval_metric # Only rank = 0 trainer saves modules. if train_config.save_module_checkpoints and rank == 0: best_model_path = os.path.join( train_config.modules_save_dir, "best_model") optimizer, = optimizers # PyText only ever returns a single optimizer in this list torch.save( ModelState( epoch=epoch, parameters=model.state_dict(), optimizer=optimizer.state_dict(), ), best_model_path) if (self.config.early_stop_after > 0 and (epoch - last_best_epoch == self.config.early_stop_after)): LOG.info( f"Rank {rank} worker: Eval metric hasn't changed for " f"{self.config.early_stop_after} epochs. Stopping now.") break sys.stdout.flush() train_iter.close() eval_iter.close() model.load_state_dict(torch.load(best_model_path).parameters) return model, best_metric