def train_from_state( self, state: TrainingState, training_data: BatchIterator, eval_data: BatchIterator, metric_reporter: MetricReporter, train_config: PyTextConfig, ) -> Tuple[torch.nn.Module, Any]: """ Train and eval a model from a given training state will be modified. This function iterates epochs specified in config, and for each epoch do: 1. Train model using training data, aggregate and report training results 2. Adjust learning rate if scheduler is specified 3. Evaluate model using evaluation data 4. Calculate metrics based on evaluation results and select best model Args: training_state (TrainingState): contrains stateful information to be able to restore a training job train_iter (BatchIterator): batch iterator of training data eval_iter (BatchIterator): batch iterator of evaluation data model (Model): model to be trained metric_reporter (MetricReporter): compute metric based on training output and report results to console, file.. etc train_config (PyTextConfig): training config Returns: model, best_metric: the trained model together with the best metric """ training_data = self.set_up_training(state, training_data) model = state.model rank = state.rank trainable_params = sum(p.numel() for p in state.model.parameters() if p.requires_grad) print(f"Model :{model}") print(f"Num trainable parameters: {trainable_params}") while self.continue_training(state): state.epoch += 1 state.epochs_since_last_improvement += 1 lrs = learning_rates(state.optimizer) print(f"\nWorker {state.rank} starting epoch {state.epoch}") print(f"Learning rate(s): {', '.join(map(str, lrs))}") with timing.time("train epoch"): state.stage = Stage.TRAIN state.model.train() print(f"start training epoch {state.epoch}") epoch_data = training_data if self.config.num_batches_per_epoch: # We want to limit the number of batches in the epoch; # equivalent to epoch_data[:num_batches_per_epoch] for iterators. # In this case we set the training data iterator to cycle earlier # in the training process, so when it reaches the end it will # loop back to the beginning. epoch_data = itertools.islice( epoch_data, self.config.num_batches_per_epoch) self.run_epoch(state, epoch_data, metric_reporter) if not self.config.do_eval: continue with timing.time("eval epoch"): state.stage = Stage.EVAL model.eval(Stage.EVAL) print(f"start evaluating epoch {state.epoch}") with torch.no_grad(): eval_metric = self.run_epoch(state, eval_data, metric_reporter) # Step the learning rate scheduler(s) assert eval_metric is not None state.scheduler.step_epoch( metrics=metric_reporter.get_model_select_metric(eval_metric), epoch=state.epoch, ) # Did we train a better model? better_model = metric_reporter.compare_metric( eval_metric, state.best_model_metric) if better_model: self.update_best_model(state, train_config, eval_metric) if better_model or train_config.save_all_checkpoints: self.save_checkpoint(state, train_config) if self.optimizer.finalize(): should_update_model = True eval_metric = None if self.config.do_eval: state.stage = Stage.EVAL model.eval(Stage.EVAL) print(f"start evaluating finalized state") with torch.no_grad(): eval_metric = self.run_epoch(state, eval_data, metric_reporter) should_update_model = metric_reporter.compare_metric( eval_metric, state.best_model_metric) if should_update_model: self.update_best_model(state, train_config, eval_metric) if should_update_model or train_config.save_all_checkpoints: self.save_checkpoint(state, train_config) # Only bother loading the best model for master worker if (rank == 0 and state.best_model_state is not None and self.config.load_best_model_after_train): self.load_best_model(state) return state.model, state.best_model_metric
def train( self, train_iter: BatchIterator, eval_iter: BatchIterator, model: Model, metric_reporter: MetricReporter, train_config: PyTextConfig, optimizer: torch.optim.Optimizer, scheduler: Scheduler = None, rank: int = 0, ) -> Tuple[torch.nn.Module, Any]: """ Train and eval a model, the model states will be modified. This function iterates epochs specified in config, and for each epoch do: 1. Train model using training data, aggregate and report training results 2. Adjust learning rate if scheduler is specified 3. Evaluate model using evaluation data 4. Calculate metrics based on evaluation results and select best model Args: train_iter (BatchIterator): batch iterator of training data eval_iter (BatchIterator): batch iterator of evaluation data model (Model): model to be trained metric_reporter (MetricReporter): compute metric based on training output and report results to console, file.. etc train_config (PyTextConfig): training config optimizer (torch.optim.Optimizer): torch optimizer to be used scheduler (Scheduler): learning rate scheduler, default is None training_result (Optional): only meaningful for Hogwild training. default is None rank (int): only used in distributed training, the rank of the current training thread, evaluation will only be done in rank 0 Returns: model, best_metric: the trained model together with the best metric """ timer = time_utils.StageTimer() world_size = 1 if cuda_utils.CUDA_ENABLED: model = model.cuda() world_size = cuda_utils.DISTRIBUTED_WORLD_SIZE if world_size > 1: device_id = torch.cuda.current_device() model = DistributedModel( module=model, device_ids=[device_id], output_device=device_id, broadcast_buffers=False, ) timer.add_stage(stage="init_distributed_model") best_metric = None last_best_epoch = 0 timer.add_stage(stage="pre_training") if scheduler: scheduler.prepare(train_iter, self.config.epochs) def training_pre_batch_callback(): if world_size > 1: # replace optimizer.zero_grad() here to work with DDP # in cases where some parameters don't receive grads at each step # loss.backward will set grad for params in the computation graph # we can thus follow which params are left out and call .backward # on them manually for p in model.parameters(): if p.grad is not None: p.grad.detach_() p.grad = None else: optimizer.zero_grad() def training_backprop(loss, timer=None): timer = timer or time_utils.StageTimer() loss.backward() if world_size > 1: # DDP fix when some parameters don't receive grads for p in model.parameters(): if p.requires_grad and p.grad is None: p.backward(torch.zeros_like(p.data)) timer.add_stage("backward") if scheduler: scheduler.step_batch() if self.config.max_clip_norm is not None: grad_norm = torch.nn.utils.clip_grad_norm_( model.parameters(), self.config.max_clip_norm) else: grad_norm = None optimizer.step() timer.add_stage("update_grads") # grad_norm could be used to check grads sync in distributed training return grad_norm time_start = time.time() best_model_state = None for epoch in range(1, self.config.epochs + 1): sys.stdout.flush() if self.config.target_time_limit_seconds > 0 and epoch > 1: time_elapsed = time.time() - time_start mean_epoch_time = time_elapsed / float(epoch - 1) expected_next_epoch_time = time_elapsed + mean_epoch_time if expected_next_epoch_time > self.config.target_time_limit_seconds: print( f"Training stopped after {epoch - 1} epochs and " f"{int(time_elapsed)} seconds, due to the target max training " f"time of {self.config.target_time_limit_seconds} seconds." ) break print(f"Rank {rank} worker: Starting epoch #{epoch}") model.train() lrs = (str(lr) for lr in learning_rates(optimizer)) print(f"Learning rate(s): {', '.join(lrs)}") self._run_epoch( Stage.TRAIN, epoch, train_iter, model, metric_reporter, pre_batch=training_pre_batch_callback, backprop=training_backprop, rank=rank, num_samples_to_log_progress=self.config. num_samples_to_log_progress, ) timer.add_stage(stage=f"epoch_train") if self.config.do_eval: model.eval(Stage.EVAL) with torch.no_grad(): eval_metric = self._run_epoch( Stage.EVAL, epoch, eval_iter, model, metric_reporter, rank=rank, num_samples_to_log_progress=self.config. num_samples_to_log_progress, ) timer.add_stage(stage=f"epoch_eval") # Step the learning rate scheduler(s) if scheduler: assert eval_metric is not None scheduler.step_epoch( metrics=metric_reporter.get_model_select_metric( eval_metric), epoch=epoch, ) # choose best model. if metric_reporter.compare_metric(eval_metric, best_metric): last_best_epoch = epoch best_metric = eval_metric # Only rank = 0 trainer saves modules. if train_config.save_module_checkpoints and rank == 0: model.save_modules( base_path=train_config.modules_save_dir, suffix=f"-ep{epoch}", ) if rank == 0: print(f"Rank {rank} worker: Found a better model!") model_state = model.state_dict() # save to cpu to avoid multiple model copies in gpu memory if cuda_utils.CUDA_ENABLED: for key, state in model_state.items(): model_state[key] = state.cpu() best_model_state = model_state timer.add_stage(stage=f"epoch_save/load_module") if self.config.early_stop_after > 0 and ( epoch - last_best_epoch == self.config.early_stop_after): print( f"Rank {rank} worker: Eval metric hasn't changed for " + f"{self.config.early_stop_after} epochs. Stopping now." ) break if rank == 0 and best_model_state is not None: if cuda_utils.CUDA_ENABLED: for key, state in best_model_state.items(): best_model_state[key] = state.cuda() model.load_state_dict(best_model_state) timer.report("Trainer train timer") return model, best_metric
def train( self, training_data: BatchIterator, eval_data: BatchIterator, model: Model, metric_reporter: MetricReporter, train_config: PyTextConfig, rank: int = 0, ) -> Tuple[torch.nn.Module, Any]: """ Train and eval a model, the model states will be modified. This function iterates epochs specified in config, and for each epoch do: 1. Train model using training data, aggregate and report training results 2. Adjust learning rate if scheduler is specified 3. Evaluate model using evaluation data 4. Calculate metrics based on evaluation results and select best model Args: train_iter (BatchIterator): batch iterator of training data eval_iter (BatchIterator): batch iterator of evaluation data model (Model): model to be trained metric_reporter (MetricReporter): compute metric based on training output and report results to console, file.. etc train_config (PyTextConfig): training config training_result (Optional): only meaningful for Hogwild training. default is None rank (int): only used in distributed training, the rank of the current training thread, evaluation will only be done in rank 0 Returns: model, best_metric: the trained model together with the best metric """ state = TrainingState(model=model, optimizer=self.optimizer, scheduler=self.scheduler, rank=rank) self.set_up_training(state, training_data) while self.continue_training(state): state.epoch += 1 state.epochs_since_last_improvement += 1 print(f"Worker {state.rank} starting epoch {state.epoch}", flush=True) lrs = learning_rates(state.optimizer) print(f"Learning rate(s): {', '.join(map(str, lrs))}") with timing.time("train epoch"): state.stage = Stage.TRAIN state.model.train() print(f"start training epoch {state.epoch}", flush=True) self.run_epoch(state, training_data, metric_reporter) if not self.config.do_eval: continue with timing.time("eval epoch"): state.stage = Stage.EVAL model.eval(Stage.EVAL) print(f"start evaluating epoch {state.epoch}", flush=True) with torch.no_grad(): eval_metric = self.run_epoch(state, eval_data, metric_reporter) # Step the learning rate scheduler(s) assert eval_metric is not None state.scheduler.step_epoch( metrics=metric_reporter.get_model_select_metric(eval_metric), epoch=state.epoch, ) # Did we train a better model? if metric_reporter.compare_metric(eval_metric, state.best_model_metric): state.epochs_since_last_improvement = 0 state.best_model_metric = eval_metric self.save_checkpoint(state, train_config) # Only bother loading the best model for master worker if rank == 0 and state.best_model_state is not None: self.load_best_model(state) return state.model, state.best_model_metric
def run_epoch(self, state: TrainingState, data: BatchIterator, metric_reporter: MetricReporter): # This method is due for some refactoring, pushing it off because it interacts # with the metric reporter too much. Much of the logic here either changes in # the NewTaskTrainer or should change with a better metric reporter design. report_metric = state.stage != Stage.TRAIN or self.config.report_train_metrics model = state.model samples = [] is_data_empty = True """ Sometimes, a batch of inputs is too large to fit into GPU, which has to be split into several micro-batches. However, to improve efficiency, it would be helpful to only apply params/gradients sync at original batch boundaries instead of micro-batch boundaries. num_accumulated_batches specified the number of accumulating gradients locally before sync gradients, total training_batch_size = train_batch_size x num_accumulated_batches and it will improve the system performance by reduce the total network transfer bytes. """ for sample in enumerate(data): is_data_empty = False samples.append(sample) if (state.stage != Stage.TRAIN or len(samples) == self.config.num_accumulated_batches): self.run_step(samples, state, metric_reporter, report_metric) samples = [] if samples: self.run_step(samples, state, metric_reporter, report_metric) samples = [] metrics = None if report_metric: if is_data_empty: error_msg = ( f"Trying to report metric for stage {state.stage}, but no data was " "found. Either disable metric reporting for this stage, pass in " "non-empty data, or see if data fields are misnamed (warnings " "would appear in preceding stdout logs).") raise ValueError(error_msg) if hasattr(model, "get_model_size") and state.stage != Stage.TRAIN: metric_reporter.all_context[ "current_model_parameter_size"] = model.get_model_size() with timing.time("report metrics"): metrics = metric_reporter.report_metric( model, state.stage, state.epoch, print_to_channels=(state.rank == 0), optimizer=getattr( state, "optimizer", None), # optimizer is not present during test ) else: metric_reporter._reset() if state.rank == 0 and self.config.sparsifier: current_sparsity = self.sparsifier.get_current_sparsity( state.model) print(f"sparsity in the model: {current_sparsity}") return metrics
def train( self, train_iter: BatchIterator, eval_iter: BatchIterator, model: Model, metric_reporter: MetricReporter, train_config: PyTextConfig, optimizer: torch.optim.Optimizer, scheduler=None, rank: int = 0, ) -> Tuple[torch.nn.Module, Any]: """ Train and eval a model, the model states will be modified. This function iterates epochs specified in config, and for each epoch do: 1. Train model using training data, aggregate and report training results 2. Adjust learning rate if scheduler is specified 3. Evaluate model using evaluation data 4. Calculate metrics based on evaluation results and select best model Args: train_iter (BatchIterator): batch iterator of training data eval_iter (BatchIterator): batch iterator of evaluation data model (Model): model to be trained metric_reporter (MetricReporter): compute metric based on training output and report results to console, file.. etc train_config (PyTextConfig): training config optimizer (torch.optim.Optimizer): torch optimizer to be used scheduler (Optional[torch.optim.lr_scheduler]): learning rate scheduler, default is None training_result (Optional): only meaningful for Hogwild training. default is None rank (int): only used in distributed training, the rank of the current training thread, evaluation will only be done in rank 0 Returns: model, best_metric: the trained model together with the best metric """ if cuda_utils.CUDA_ENABLED: model = model.cuda() if cuda_utils.DISTRIBUTED_WORLD_SIZE > 1: device_id = torch.cuda.current_device() model = DistributedModel( module=model, device_ids=[device_id], output_device=device_id, broadcast_buffers=False, ) best_metric = None last_best_epoch = 0 scheduler = self._prepare_scheduler(train_iter, scheduler) def training_pre_batch_callback(): optimizer.zero_grad() def training_backprop(loss): loss.backward() if scheduler: scheduler.step_batch() if self.config.max_clip_norm is not None: grad_norm = torch.nn.utils.clip_grad_norm_( model.parameters(), self.config.max_clip_norm) else: grad_norm = None optimizer.step() # grad_norm could be used to check grads sync in distributed training return grad_norm for epoch in range(1, self.config.epochs + 1): print(f"Rank {rank} worker: Starting epoch #{epoch}") model.train() lrs = (str(lr) for lr in learning_rates(optimizer)) print(f"Learning rate(s): {', '.join(lrs)}") self._run_epoch( Stage.TRAIN, epoch, train_iter, model, metric_reporter, pre_batch=training_pre_batch_callback, backprop=training_backprop, rank=rank, ) model.eval(Stage.EVAL) with torch.no_grad(): eval_metric = self._run_epoch(Stage.EVAL, epoch, eval_iter, model, metric_reporter, rank=rank) # Step the learning rate scheduler(s) if scheduler: assert eval_metric is not None scheduler.step( metrics=metric_reporter.get_model_select_metric( eval_metric), epoch=epoch, ) # choose best model. if metric_reporter.compare_metric(eval_metric, best_metric): last_best_epoch = epoch best_metric = eval_metric # Only rank = 0 trainer saves modules. if train_config.save_module_checkpoints and rank == 0: model.save_modules(base_path=train_config.modules_save_dir, suffix=f"-ep{epoch}") if rank == 0: print(f"Rank {rank} worker: Found a better model!") model_state = model.state_dict() # save to cpu to avoid multiple model copies in gpu memory if cuda_utils.CUDA_ENABLED: for key, state in model_state.items(): model_state[key] = state.cpu() best_model_state = model_state if self.config.early_stop_after > 0 and ( epoch - last_best_epoch == self.config.early_stop_after): print(f"Rank {rank} worker: Eval metric hasn't changed for " + f"{self.config.early_stop_after} epochs. Stopping now.") break sys.stdout.flush() if rank == 0: if cuda_utils.CUDA_ENABLED: for key, state in best_model_state.items(): best_model_state[key] = state.cuda() model.load_state_dict(best_model_state) return model, best_metric