class PredictionEpochLoop(Loop): """Loop performing prediction on arbitrary sequentially used dataloaders.""" def __init__(self) -> None: super().__init__() self.return_predictions = False self.predictions: List[Any] = [] self.current_batch_indices: List[int] = [] self.batch_progress = Progress() self._dl_max_batches = 0 self._num_dataloaders = 0 self._warning_cache = WarningCache() self._seen_batch_indices: List[List[int]] = [] @property def done(self) -> bool: """Ends prediction when the iteration count exceeds the total number of available batches.""" return self.batch_progress.current.completed >= self._dl_max_batches @property def should_store_predictions(self) -> bool: """Whether the predictions should be stored for later usage (e.g. aggregation or returning)""" any_pred = any(cb.interval.on_epoch for cb in self.trainer.prediction_writer_callbacks) return self.return_predictions or any_pred def connect(self, **kwargs: "Loop") -> None: raise NotImplementedError( f"{self.__class__.__name__} does not connect any child loops.") def reset(self) -> None: """Resets the loops internal state.""" self._seen_batch_indices = [] self.predictions = [] self.batch_progress.reset_on_run() def on_run_start( # type: ignore[override] self, dataloader_iter: Iterator, dataloader_idx: int, dl_max_batches: int, num_dataloaders: int, ) -> None: """Prepares the loops internal state. Args: dataloader_iter: the iterator over the current dataloader dataloader_idx: the index of the current dataloader dl_max_batches: the maximum number of batches the current loader can produce num_dataloaders: the total number of dataloaders """ void(dataloader_iter, dataloader_idx) self._dl_max_batches = dl_max_batches self._num_dataloaders = num_dataloaders # this call requires that `self.return_predictions` is set self._seen_batch_indices = self._get_batch_indices( dataloader_idx) if self.should_store_predictions else [] def advance( # type: ignore[override] self, dataloader_iter: Iterator, dataloader_idx: int, dl_max_batches: int, num_dataloaders: int, ) -> None: """Runs one prediction step. Args: dataloader_iter: the iterator over the current dataloader dataloader_idx: the index of the current dataloader dl_max_batches: the maximum number of batches the current loader can produce num_dataloaders: the total number of dataloaders """ action_name = f"[{self.__class__.__name__}].predict_dataloader_idx_{dataloader_idx}_next" with self.trainer.profiler.profile(action_name): batch_idx, batch = next(dataloader_iter) self._seen_batch_indices = self._get_batch_indices( dataloader_idx) if self.should_store_predictions else [] # we need to truncate the list of batch indices due to prefetching in the dataloader and Lightning self._seen_batch_indices = self._seen_batch_indices[:( self.batch_progress.current.completed + 1)] if batch is None: raise StopIteration batch = self.trainer._call_strategy_hook("batch_to_device", batch, dataloader_idx=dataloader_idx) self.batch_progress.increment_ready() self._predict_step(batch, batch_idx, dataloader_idx) def on_run_end(self) -> Tuple[List[Any], List[List[int]]]: """Returns the predictions and the corresponding batch indices.""" predictions, all_batch_indices = self.predictions, self._seen_batch_indices self.predictions, self._seen_batch_indices = [], [] # free memory return predictions, all_batch_indices def _predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int) -> None: """Runs the actual predict step together with all the necessary bookkeeping and the hooks tied to the predict step. Args: batch: the current batch to run the prediction on batch_idx: the index of the current batch dataloader_idx: the index of the dataloader producing the current batch """ # configure step_kwargs step_kwargs = self._build_kwargs(batch, batch_idx, dataloader_idx) # extract batch_indices and store them batch_indices = self._get_batch_indices(dataloader_idx) self.current_batch_indices = batch_indices[ batch_idx] if batch_indices else [] self.trainer._call_callback_hooks("on_predict_batch_start", batch, batch_idx, dataloader_idx) self.trainer._call_lightning_module_hook("on_predict_batch_start", batch, batch_idx, dataloader_idx) self.batch_progress.increment_started() predictions = self.trainer._call_strategy_hook("predict_step", *step_kwargs.values()) self.batch_progress.increment_processed() if predictions is None: self._warning_cache.warn( "predict returned None if it was on purpose, ignore this warning..." ) self.trainer._call_callback_hooks("on_predict_batch_end", predictions, batch, batch_idx, dataloader_idx) self.trainer._call_lightning_module_hook("on_predict_batch_end", predictions, batch, batch_idx, dataloader_idx) self.batch_progress.increment_completed() if self.should_store_predictions: self.predictions.append( move_data_to_device(predictions, torch.device("cpu"))) def _build_kwargs(self, batch: Any, batch_idx: int, dataloader_idx: int) -> Dict[str, Any]: """Assembles the keyword arguments for the ``predict_step`` Args: batch: the current batch to run the prediction on batch_idx: the index of the current batch dataloader_idx: the index of the dataloader producing the current batch Returns: the dictionary containing all the keyboard arguments for the predict step """ step_kwargs = OrderedDict([("batch", batch), ("batch_idx", batch_idx)]) if self._num_dataloaders > 1: step_kwargs["dataloader_idx"] = dataloader_idx return step_kwargs def _get_batch_indices(self, dataloader_idx: int) -> List[List[int]]: """Returns a reference to the seen batch indices if the dataloader has a batch sampler wrapped by our :class:`~pytorch_lightning.overrides.distributed.IndexBatchSamplerWrapper`.""" # the batch_sampler is not be defined in case of CombinedDataLoaders batch_sampler = getattr( self.trainer. predict_dataloaders[dataloader_idx], # type: ignore[has-type] "batch_sampler", None, ) if isinstance(batch_sampler, IndexBatchSamplerWrapper): return batch_sampler.seen_batch_indices warning_cache.warn( "Lightning couldn't infer the indices fetched for your dataloader." ) return []
class WandbLogger(LightningLoggerBase): r""" Log using `Weights and Biases <https://www.wandb.com/>`_. Install it with pip: .. code-block:: bash pip install wandb Args: name: Display name for the run. save_dir: Path where data is saved (wandb dir by default). offline: Run offline (data can be streamed later to wandb servers). id: Sets the version, mainly used to resume a previous run. version: Same as id. anonymous: Enables or explicitly disables anonymous logging. project: The name of the project to which this run will belong. log_model: Save checkpoints in wandb dir to upload on W&B servers. prefix: A string to put at the beginning of metric keys. sync_step: Sync Trainer step with wandb step. experiment: WandB experiment object. Automatically set when creating a run. \**kwargs: Additional arguments like `entity`, `group`, `tags`, etc. used by :func:`wandb.init` can be passed as keyword arguments in this logger. Example:: from pytorch_lightning.loggers import WandbLogger from pytorch_lightning import Trainer wandb_logger = WandbLogger() trainer = Trainer(logger=wandb_logger) Note: When logging manually through `wandb.log` or `trainer.logger.experiment.log`, make sure to use `commit=False` so the logging step does not increase. See Also: - `Tutorial <https://colab.research.google.com/drive/16d1uctGaw2y9KhGBlINNTsWpmlXdJwRW?usp=sharing>`__ on how to use W&B with PyTorch Lightning - `W&B Documentation <https://docs.wandb.ai/integrations/lightning>`__ """ LOGGER_JOIN_CHAR = '-' def __init__(self, name: Optional[str] = None, save_dir: Optional[str] = None, offline: Optional[bool] = False, id: Optional[str] = None, anonymous: Optional[bool] = False, version: Optional[str] = None, project: Optional[str] = None, log_model: Optional[bool] = False, experiment=None, prefix: Optional[str] = '', sync_step: Optional[bool] = True, **kwargs): if wandb is None: raise ImportError( 'You want to use `wandb` logger which is not installed yet,' # pragma: no-cover ' install it with `pip install wandb`.') if offline and log_model: raise MisconfigurationException( f'Providing log_model={log_model} and offline={offline} is an invalid configuration' ' since model checkpoints cannot be uploaded in offline mode.\n' 'Hint: Set `offline=False` to log your model.') super().__init__() self._name = name self._save_dir = save_dir self._offline = offline self._id = version or id self._anonymous = 'allow' if anonymous else None self._project = project self._log_model = log_model self._prefix = prefix self._sync_step = sync_step self._experiment = experiment self._kwargs = kwargs # logging multiple Trainer on a single W&B run (k-fold, resuming, etc) self._step_offset = 0 self.warning_cache = WarningCache() def __getstate__(self): state = self.__dict__.copy() # args needed to reload correct experiment state[ '_id'] = self._experiment.id if self._experiment is not None else None # cannot be pickled state['_experiment'] = None return state @property @rank_zero_experiment def experiment(self) -> Run: r""" Actual wandb object. To use wandb features in your :class:`~pytorch_lightning.core.lightning.LightningModule` do the following. Example:: self.logger.experiment.some_wandb_function() """ if self._experiment is None: if self._offline: os.environ['WANDB_MODE'] = 'dryrun' self._experiment = wandb.init( name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous, id=self._id, resume='allow', **self._kwargs) if wandb.run is None else wandb.run # offset logging step when resuming a run self._step_offset = self._experiment.step # save checkpoints in wandb dir to upload on W&B servers if self._save_dir is None: self._save_dir = self._experiment.dir return self._experiment def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100): self.experiment.watch(model, log=log, log_freq=log_freq) @rank_zero_only def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: params = self._convert_params(params) params = self._flatten_dict(params) params = self._sanitize_callable_params(params) self.experiment.config.update(params, allow_val_change=True) @rank_zero_only def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None: assert rank_zero_only.rank == 0, 'experiment tried to log from global_rank != 0' metrics = self._add_prefix(metrics) if self._sync_step and step is not None and step + self._step_offset < self.experiment.step: self.warning_cache.warn( 'Trying to log at a previous step. Use `WandbLogger(sync_step=False)`' ' or try logging with `commit=False` when calling manually `wandb.log`.' ) if self._sync_step: self.experiment.log( metrics, step=(step + self._step_offset) if step is not None else None) elif step is not None: self.experiment.log({ **metrics, 'trainer_step': (step + self._step_offset) }) else: self.experiment.log(metrics) @property def save_dir(self) -> Optional[str]: return self._save_dir @property def name(self) -> Optional[str]: # don't create an experiment if we don't have one return self._experiment.project_name( ) if self._experiment else self._name @property def version(self) -> Optional[str]: # don't create an experiment if we don't have one return self._experiment.id if self._experiment else self._id @rank_zero_only def finalize(self, status: str) -> None: # offset future training logged on same W&B run if self._experiment is not None: self._step_offset = self._experiment.step # upload all checkpoints from saving dir if self._log_model: wandb.save(os.path.join(self.save_dir, "*.ckpt"))
class TrainLoop: def __init__(self, trainer, multiple_trainloader_mode): self.trainer = trainer self.early_stopping_accumulator = None self.checkpoint_accumulator = None self.accumulated_loss = None self.warning_cache = WarningCache() self._teardown_already_run = False self.running_loss = TensorRunningAccum(window_length=20) self.automatic_optimization = True self._curr_step_result = None self._cur_grad_norm_dict = None self._multiple_trainloader_mode = multiple_trainloader_mode self._skip_backward = False self.trainer._multiple_trainloader_mode = multiple_trainloader_mode def on_trainer_init( self, max_epochs, min_epochs, max_steps, min_steps, num_sanity_val_steps, weights_summary, ): self.trainer.global_step = 0 self.trainer.current_epoch = 0 self.trainer.should_stop = False self.trainer._state = TrainerState.INITIALIZING self.trainer.total_batch_idx = 0 self.trainer.batch_idx = 0 self.trainer.num_training_batches = 0 self.trainer.train_dataloader = None # If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000 self.trainer.max_epochs = 1000 if ( max_epochs is None and max_steps is None) else max_epochs # If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1 self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs self.trainer.max_steps = max_steps self.trainer.min_steps = min_steps if num_sanity_val_steps == -1: self.trainer.num_sanity_val_steps = float("inf") else: self.trainer.num_sanity_val_steps = num_sanity_val_steps self.trainer.weights_summary = weights_summary if weights_summary is not None and weights_summary not in ModelSummary.MODES: raise MisconfigurationException( f"`weights_summary` can be None, {', '.join(ModelSummary.MODES)}, got {weights_summary}" ) @property def num_optimizers(self): num_optimizers = len(self.get_optimizers_iterable()) return num_optimizers def should_skip_training(self): should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0 def on_train_start(self): # hook self.trainer.call_hook("on_train_start") def setup_fit(self, model, train_dataloader=None, val_dataloaders=None, datamodule=None): # clean hparams if hasattr(model, "hparams"): parsing.clean_namespace(model.hparams) # links data to the trainer self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule) # check that model is configured correctly self.trainer.config_validator.verify_loop_configurations(model) # attach model log function to callback self.trainer.callback_connector.attach_model_logging_functions(model) def on_train_end(self): if self._teardown_already_run: return self._teardown_already_run = True # trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates # when a checkpoint was saved at the last step self.trainer.global_step -= 1 self.check_checkpoint_callback(should_update=True, is_last=True) self.trainer.global_step += 1 # hook self.trainer.call_hook("on_train_end") # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers. # It might be related to xla tensors blocked when moving the cpu # kill loggers if self.trainer.logger is not None: self.trainer.logger.finalize("success") # summarize profile results self.trainer.profiler.describe() # give accelerators a chance to finish self.trainer.accelerator.on_train_end() # reset bookkeeping self.trainer._running_stage = None def check_checkpoint_callback(self, should_update, is_last=False): # TODO bake this logic into the ModelCheckpoint callback if should_update and self.trainer.checkpoint_connector.has_trained: callbacks = self.trainer.checkpoint_callbacks if is_last and any(cb.save_last and cb.verbose for cb in callbacks): rank_zero_info("Saving latest checkpoint...") model = self.trainer.lightning_module for cb in callbacks: cb.on_validation_end(self.trainer, model) def check_early_stopping_callback(self, should_update): # TODO bake this logic into the EarlyStopping callback if should_update and self.trainer.checkpoint_connector.has_trained: callbacks = [ c for c in self.trainer.callbacks if isinstance(c, EarlyStopping) ] model = self.trainer.lightning_module for cb in callbacks: cb.on_validation_end(self.trainer, model) def on_train_epoch_start(self, epoch): # update training progress in trainer self.trainer.current_epoch = epoch model = self.trainer.lightning_module # reset train dataloader if epoch != 0 and self.trainer.reload_dataloaders_every_epoch: self.trainer.reset_train_dataloader(model) # todo: specify the possible exception with suppress(Exception): # set seed for distributed sampler (enables shuffling for each epoch) self.trainer.train_dataloader.sampler.set_epoch(epoch) # changing gradient according accumulation_scheduler self.trainer.accumulation_scheduler.on_epoch_start( self.trainer, self.trainer.lightning_module) # stores accumulated grad fractions per batch self.accumulated_loss = TensorRunningAccum( window_length=self.trainer.accumulate_grad_batches) # structured result accumulators for callbacks self.early_stopping_accumulator = Accumulator() self.checkpoint_accumulator = Accumulator() # hook self.trainer.call_hook("on_epoch_start") self.trainer.call_hook("on_train_epoch_start") def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx): # hook self.trainer.call_hook('on_train_batch_end', batch_end_outputs, batch, batch_idx, dataloader_idx) self.trainer.call_hook('on_batch_end') # figure out what to track for epoch end self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs) # reset batch logger internals self.trainer.logger_connector.on_train_batch_end() def reset_train_val_dataloaders(self, model): if self.trainer.train_dataloader is None or not self.trainer.reload_dataloaders_every_epoch: self.trainer.reset_train_dataloader(model) if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch: self.trainer.reset_val_dataloader(model) def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs): # track the outputs to reduce at the end of the epoch for opt_idx, opt_outputs in enumerate(batch_end_outputs): sample_output = opt_outputs[-1] # decide if we need to reduce at the end of the epoch automatically auto_reduce_tng_result = isinstance( sample_output, Result) and sample_output.should_reduce_on_epoch_end hook_overridden = (is_overridden( "training_epoch_end", model=self.trainer.lightning_module) or is_overridden( "on_train_epoch_end", model=self.trainer.lightning_module)) # only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end if not (hook_overridden or auto_reduce_tng_result): continue # with 1 step (no tbptt) don't use a sequence at epoch end if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance( opt_outputs[0], Result): opt_outputs = opt_outputs[0] epoch_output[opt_idx].append(opt_outputs) def get_optimizers_iterable(self): """ Generates an iterable with (idx, optimizer) for each optimizer. """ if not self.trainer.optimizer_frequencies: # call training_step once per optimizer return list(enumerate(self.trainer.optimizers)) optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies) optimizers_loop_length = optimizer_freq_cumsum[-1] current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length # find optimzier index by looking for the first {item > current_place} in the cumsum list opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop) return [[opt_idx, self.trainer.optimizers[opt_idx]]] def on_after_backward(self, training_step_output, batch_idx, untouched_loss): is_result_obj = isinstance(training_step_output, Result) if is_result_obj: training_step_output = training_step_output.detach() else: training_step_output.batch_loss = training_step_output.batch_loss.detach( ) # insert after step hook self.trainer.call_hook("on_after_backward") # when in dev debugging track the losses self.trainer.dev_debugger.track_train_loss_history( batch_idx, untouched_loss.detach()) def _check_training_step_output(self, training_step_output): if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization: if training_step_output.grad_fn is None: # TODO: Find why - RuntimeError: Expected to mark a variable ready only once ... raise MisconfigurationException( "In manual optimization, `training_step` should not return a Tensor" ) def training_step(self, split_batch, batch_idx, opt_idx, hiddens): # give the PL module a result for logging model_ref = self.trainer.lightning_module with self.trainer.profiler.profile("model_forward"): args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens) # manually capture logged metrics model_ref._current_fx_name = 'training_step' model_ref._results = Result() with self.trainer.profiler.profile("training_step"): training_step_output = self.trainer.accelerator.training_step( args) self.trainer.accelerator.post_training_step() self.trainer.logger_connector.cache_logged_metrics() self._check_training_step_output(training_step_output) training_step_output = self.trainer.call_hook( "training_step_end", training_step_output) training_step_output_for_epoch_end, training_step_output = self._process_training_step_output( training_step_output, split_batch) is_result_obj = isinstance(training_step_output, Result) if training_step_output_for_epoch_end is None: return None # enable empty loss when using manual opt closure_loss = None untouched_loss = None if self.automatic_optimization: # accumulate loss # (if accumulate_grad_batches = 1 no effect) if is_result_obj: closure_loss = training_step_output.minimize else: closure_loss = training_step_output.batch_loss closure_loss = closure_loss / self.trainer.accumulate_grad_batches # the loss will get scaled for amp. avoid any modifications to it untouched_loss = closure_loss.detach().clone() # result result = AttributeDict( closure_loss=closure_loss, loss=untouched_loss, training_step_output=training_step_output, training_step_output_for_epoch_end= training_step_output_for_epoch_end, hiddens=training_step_output.hiddens, ) return result def _process_training_step_output(self, training_step_output, split_batch): training_step_output_for_epoch_end = training_step_output # enable validation_step return None if training_step_output_for_epoch_end is None: return None, None # ----------------------------------------- # process hybrid (1.0) # ----------------------------------------- # no need for these checks in 1.0.0 # TODO: remove checks in 1.0.0 is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor) is_1_0_output = is_tensor or ("log" not in training_step_output and "progress_bar" not in training_step_output) if is_1_0_output: return self._process_training_step_output_1_0( training_step_output, split_batch) # ----------------------------------------- # process old dict (deprecate 1.0) # ----------------------------------------- training_step_output = self.trainer.process_dict_result( training_step_output, train=True) training_step_output = AttributeDict( batch_loss=training_step_output[0], pbar_on_batch_end=training_step_output[1], log_metrics=training_step_output[2], callback_metrics=training_step_output[3], hiddens=training_step_output[4], ) # if the user decides to finally reduce things in epoch_end, save raw output without graphs if isinstance(training_step_output_for_epoch_end, torch.Tensor): training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach( ) else: training_step_output_for_epoch_end = recursive_detach( training_step_output_for_epoch_end) return training_step_output_for_epoch_end, training_step_output def _process_training_step_output_1_0(self, training_step_output, split_batch): result = self.trainer.lightning_module._results loss = None hiddens = None # handle dict return if isinstance(training_step_output, dict): loss = training_step_output.pop("loss", None) hiddens = training_step_output.pop("hiddens", None) result["extra"] = training_step_output # handle scalar return elif isinstance(training_step_output, torch.Tensor): loss = training_step_output result["extra"] = {} # map to results under the hood result.minimize = loss result.hiddens = hiddens # track batch for manual reduction with result result.track_batch_size(len(split_batch)) # track metrics without grads for epoch reduction training_step_output_for_epoch_end = copy(result) training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach( ) if self.trainer.move_metrics_to_cpu: training_step_output_for_epoch_end = training_step_output_for_epoch_end.cpu( ) # what flows back into the system training_step_output = result return training_step_output_for_epoch_end, training_step_output def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure): model_ref = self.trainer.lightning_module is_lbfgs = isinstance(optimizer, torch.optim.LBFGS) using_native_amp = self.trainer.amp_backend == AMPType.NATIVE # native amp + lbfgs is a no go right now if using_native_amp and is_lbfgs: raise MisconfigurationException( 'native PyTorch amp and lbfgs are not compatible.' ' To request, please file a Github issue in PyTorch and tag @mcarilli' ) # wraps into LightningOptimizer only for running step optimizer = LightningOptimizer._to_lightning_optimizer( optimizer, self.trainer, opt_idx) # model hook model_ref.optimizer_step( self.trainer.current_epoch, batch_idx, optimizer, opt_idx, train_step_and_backward_closure, on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE, using_native_amp=using_native_amp, using_lbfgs=is_lbfgs, ) def on_before_zero_grad(self, optimizer): self.trainer.call_hook('on_before_zero_grad', optimizer) def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx): self.trainer.accelerator.optimizer_zero_grad( self.trainer.current_epoch, batch_idx, optimizer, opt_idx) def track_and_norm_grad(self, optimizer): # track gradient norms grad_norm_dic = self._track_gradient_norm() # clip gradients self.trainer.accelerator.clip_gradients(optimizer, self.trainer.gradient_clip_val) self._cur_grad_norm_dict = grad_norm_dic def _track_gradient_norm(self): grad_norm_dict = {} if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0: if float(self.trainer.track_grad_norm) > 0: model = self.trainer.lightning_module grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm) return grad_norm_dict def process_hiddens(self, opt_closure_result): hiddens = opt_closure_result.hiddens if isinstance(opt_closure_result.training_step_output, Result): opt_closure_result.training_step_output_for_epoch_end.drop_hiddens( ) return hiddens def tbptt_split_batch(self, batch): splits = [batch] if self.trainer.truncated_bptt_steps is not None: model_ref = self.trainer.lightning_module with self.trainer.profiler.profile("tbptt_split_batch"): splits = model_ref.tbptt_split_batch( batch, self.trainer.truncated_bptt_steps) return splits def run_training_epoch(self): # modify dataloader if needed (ddp, etc...) train_dataloader = self.trainer.accelerator.process_dataloader( self.trainer.train_dataloader) # track epoch output epoch_output = [[] for _ in range(self.num_optimizers)] train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader( train_dataloader) dataloader_idx = 0 val_loop_called = False for batch_idx, (batch, is_last_batch) in train_dataloader: self.trainer.batch_idx = batch_idx # ------------------------------------ # TRAINING_STEP + TRAINING_STEP_END # ------------------------------------ with self.trainer.profiler.profile("run_training_batch"): batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx) # when returning -1 from train_step, we end epoch early if batch_output.signal == -1: break batch_end_outputs = self.process_train_step_outputs( batch_output.training_step_output_for_epoch_end, self.early_stopping_accumulator, self.checkpoint_accumulator, ) # hook # TODO: add outputs to batches self.on_train_batch_end(epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx) # ----------------------------------------- # SAVE METRICS TO LOGGERS # ----------------------------------------- self.trainer.logger_connector.log_train_step_metrics(batch_output) # ----------------------------------------- # VALIDATE IF NEEDED + CHECKPOINT CALLBACK # ----------------------------------------- should_check_val = self.should_check_val_fx( batch_idx, is_last_batch) if should_check_val: self.trainer.validating = True self.trainer.run_evaluation() self.trainer.training = True val_loop_called = True # ----------------------------------------- # SAVE LOGGERS (ie: Tensorboard, etc...) # ----------------------------------------- self.save_loggers_on_train_batch_end() # update LR schedulers monitor_metrics = deepcopy( self.trainer.logger_connector.callback_metrics) self.update_train_loop_lr_schedulers( monitor_metrics=monitor_metrics) self.trainer.checkpoint_connector.has_trained = True # max steps reached, end training if (self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1 and self._accumulated_batches_reached()): break # end epoch early # stop when the flag is changed or we've gone past the amount # requested in the batches if self.trainer.should_stop: break self.trainer.total_batch_idx += 1 # stop epoch if we limited the number of training batches if self._num_training_batches_reached(is_last_batch): break # progress global step according to grads progress self.increment_accumulated_grad_global_step() # epoch end hook self.run_on_epoch_end_hook(epoch_output) # log epoch metrics self.trainer.logger_connector.log_train_epoch_end_metrics( epoch_output, self.checkpoint_accumulator, self.early_stopping_accumulator, self.num_optimizers) should_check_val = self.should_check_val_fx(batch_idx, is_last_batch, on_epoch=True) should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation( self.trainer.num_val_batches) should_train_only = self.trainer.disable_validation or should_skip_eval # update epoch level lr_schedulers if no val loop outside train loop is triggered if (val_loop_called and not should_check_val) or should_train_only: self.trainer.optimizer_connector.update_learning_rates( interval='epoch') if should_train_only: self.check_checkpoint_callback(True) self.check_early_stopping_callback(True) if should_check_val: self.trainer.validating = True self.trainer.run_evaluation(on_epoch=True) self.trainer.training = True # increment the global step once # progress global step according to grads progress self.increment_accumulated_grad_global_step() def run_training_batch(self, batch, batch_idx, dataloader_idx): # track grad norms grad_norm_dic = {} # bookkeeping self.trainer.hiddens = None # track all outputs across time and num of optimizers batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))] if batch is None: return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic) # hook response = self.trainer.call_hook("on_batch_start") if response == -1: return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic) # hook response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx) if response == -1: return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic) # lightning module hook splits = self.tbptt_split_batch(batch) for split_idx, split_batch in enumerate(splits): # create an iterable for optimizers and loop over them for opt_idx, optimizer in self.prepare_optimizers(): # toggle model params + set info to logger_connector self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer) if self.should_accumulate(): # For gradient accumulation # ------------------- # calculate loss (train step + train step end) # ------------------- # automatic_optimization=True: perform dpp sync only when performing optimizer_step # automatic_optimization=False: don't block synchronization here with self.block_ddp_sync_behaviour(): self.training_step_and_backward( split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens) batch_outputs = self._process_closure_result( batch_outputs=batch_outputs, opt_idx=opt_idx, ) # ------------------------------ # BACKWARD PASS # ------------------------------ # gradient update with accumulated gradients else: if self.automatic_optimization: def train_step_and_backward_closure(): result = self.training_step_and_backward( split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens) return None if result is None else result.loss # optimizer step self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure) else: self._curr_step_result = self.training_step( split_batch, batch_idx, opt_idx, self.trainer.hiddens) if self._curr_step_result is None: # user decided to skip optimization # make sure to zero grad. continue batch_outputs = self._process_closure_result( batch_outputs=batch_outputs, opt_idx=opt_idx, ) # todo: Properly aggregate grad_norm accros opt_idx and split_idx grad_norm_dic = self._cur_grad_norm_dict self._cur_grad_norm_dict = None # update running loss + reset accumulated loss self.update_running_loss() result = AttributeDict( signal=0, grad_norm_dic=grad_norm_dic, training_step_output_for_epoch_end=batch_outputs, ) return result @contextmanager def block_ddp_sync_behaviour(self, should_block_sync: bool = False): """ automatic_optimization = True Blocks ddp sync gradients behaviour on backwards pass. This is useful for skipping sync when accumulating gradients, reducing communication overhead automatic_optimization = False do not block ddp gradient sync when using manual optimization as gradients are needed within the training step Returns: context manager with sync behaviour off """ if (isinstance(self.trainer.training_type_plugin, ParallelPlugin) and (self.automatic_optimization or should_block_sync)): with self.trainer.training_type_plugin.block_backward_sync(): yield None else: yield None def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list: opt_closure_result = self._curr_step_result if opt_closure_result is not None: # cache metrics self.trainer.logger_connector.cache_training_step_metrics( opt_closure_result) # track hiddens self.trainer.hiddens = self.process_hiddens(opt_closure_result) # check if loss or model weights are nan if self.trainer.terminate_on_nan: self.trainer.detect_nan_tensors(opt_closure_result.loss) # track all the outputs across all steps batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0 batch_outputs[batch_opt_idx].append( opt_closure_result.training_step_output_for_epoch_end) if self.automatic_optimization: # track total loss for logging (avoid mem leaks) self.accumulated_loss.append(opt_closure_result.loss) self._curr_step_result = None return batch_outputs def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens): """ wrap the forward step in a closure so second order methods work """ with self.trainer.profiler.profile("training_step_and_backward"): # lightning module hook result = self.training_step(split_batch, batch_idx, opt_idx, hiddens) self._curr_step_result = result if not self._skip_backward and self.automatic_optimization: is_first_batch_to_accumulate = batch_idx % self.trainer.accumulate_grad_batches == 0 if is_first_batch_to_accumulate: self.on_before_zero_grad(optimizer) self.optimizer_zero_grad(batch_idx, optimizer, opt_idx) # backward pass if result is not None: with self.trainer.profiler.profile("model_backward"): self.backward(result, optimizer, opt_idx) # hook - call this hook only # when gradients have finished to accumulate if not self.should_accumulate(): self.on_after_backward(result.training_step_output, batch_idx, result.loss) # check if loss or model weights are nan if self.trainer.terminate_on_nan: self.trainer.detect_nan_tensors(result.loss) else: self.warning_cache.warn( "training_step returned None if it was on purpose, ignore this warning..." ) if len(self.trainer.optimizers) > 1: # revert back to previous state self.trainer.lightning_module.untoggle_optimizer(opt_idx) return result def backward(self, result, optimizer, opt_idx, *args, **kwargs): self.trainer.dev_debugger.track_event("backward_call") should_accumulate = self.should_accumulate() # backward can be called manually in the training loop if isinstance(result, torch.Tensor): self.trainer.accelerator.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs) else: result.closure_loss = self.trainer.accelerator.backward( result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs) if not self.should_accumulate(): # track gradients self.track_and_norm_grad(optimizer=optimizer) def update_train_loop_lr_schedulers(self, monitor_metrics=None): num_accumulated_batches_reached = self._accumulated_batches_reached() num_training_batches_reached = self._num_training_batches_reached() if num_accumulated_batches_reached or num_training_batches_reached: # update lr self.trainer.optimizer_connector.update_learning_rates( interval="step", monitor_metrics=monitor_metrics) def run_on_epoch_end_hook(self, epoch_output): # inform logger the batch loop has finished self.trainer.logger_connector.on_train_epoch_end() self.trainer.call_hook('on_train_epoch_end', epoch_output) self.trainer.call_hook('on_epoch_end') def increment_accumulated_grad_global_step(self): num_accumulated_batches_reached = self._accumulated_batches_reached() num_training_batches_reached = self._num_training_batches_reached() # progress global step according to grads progress if num_accumulated_batches_reached or num_training_batches_reached: self.trainer.global_step += 1 def _accumulated_batches_reached(self): return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0 def _num_training_batches_reached(self, is_last_batch=False): return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch def should_accumulate(self): # checks if backward or backward + optimizer step (via closure) accumulation_done = self._accumulated_batches_reached() is_final_batch = self._num_training_batches_reached() return not (accumulation_done or is_final_batch) def should_check_val_fx(self, batch_idx, is_last_batch, on_epoch=False): # decide if we should run validation is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0 is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0 can_check_val = self.trainer.enable_validation and is_val_check_epoch is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float( "inf") epoch_end_val_check = (batch_idx + 1) % self.trainer.num_training_batches == 0 should_check_val = ( (is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop or is_last_batch_for_infinite_dataset ) if on_epoch else (is_val_check_batch and not epoch_end_val_check) return should_check_val and can_check_val def build_train_args(self, batch, batch_idx, opt_idx, hiddens): # enable not needing to add opt_idx to training_step args = [batch, batch_idx] if len(self.trainer.optimizers) > 1: if self.trainer.has_arg("training_step", "optimizer_idx"): if not self.automatic_optimization: self.warning_cache.warn( "`training_step` hook signature has changed in v1.3." " `optimizer_idx` argument has been removed in case of manual optimization. Support for" " the old signature will be removed in v1.5", DeprecationWarning) args.append(opt_idx) elif not self.trainer.has_arg( "training_step", "optimizer_idx") and self.automatic_optimization: raise ValueError( f"Your LightningModule defines {len(self.trainer.optimizers)} optimizers but" ' `training_step` is missing the `optimizer_idx` argument.' ) # pass hiddens if using tbptt if self.trainer.truncated_bptt_steps is not None: args.append(hiddens) return args def save_loggers_on_train_batch_end(self): # when loggers should save to disk should_flush_logs = self.trainer.logger_connector.should_flush_logs if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None: self.trainer.logger.save() def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator): """ Figure out what needs to be tracked/logged at the end of the epoch """ # the training step outputs a list per optimizer. The list contains the outputs at each time step # when no TBPTT is used, then the list has 1 item per batch # when TBPTT IS used, then the list has n items (1 per time step) batch_end_outputs = [] for optimizer_idx_outputs in all_train_step_outputs: # extract one representative sample from each time step (1 if no tbptt) and 0th optimizer if len(optimizer_idx_outputs) == 0: continue sample_output = optimizer_idx_outputs[-1] # pull out callback info if available (ie: Results object) if isinstance(sample_output, dict) and "early_stop_on" in sample_output: early_stopping_accumulator.accumulate( sample_output["early_stop_on"]) if isinstance(sample_output, dict) and "checkpoint_on" in sample_output: checkpoint_accumulator.accumulate( sample_output["checkpoint_on"]) batch_end_outputs.append(optimizer_idx_outputs) return batch_end_outputs def prepare_optimizers(self): # in manual optimization we loop over all optimizers at once optimizers = self.get_optimizers_iterable() if not self.automatic_optimization: optimizers = [optimizers[0]] return optimizers def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer): # set split_idx to trainer for tracking self.trainer.split_idx = split_idx # make sure only the gradients of the current optimizer's parameters are calculated # in the training step to prevent dangling gradients in multiple-optimizer setup. if self.automatic_optimization and len(self.trainer.optimizers) > 1: model = self.trainer.lightning_module model.toggle_optimizer(optimizer, opt_idx) # use to track metrics internally self.trainer.logger_connector.on_train_split_start( split_idx, opt_idx, split_batch) def update_running_loss(self): accumulated_loss = self.accumulated_loss.mean() if accumulated_loss is not None: # calculate running loss for display self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches) # reset for next set of accumulated grads self.accumulated_loss.reset()
class PredictLoop(object): def __init__(self, trainer): self.trainer = trainer self.max_batches = None self.num_dataloaders = None self.warning_cache = WarningCache() self.batch_indices: Optional[List[int]] = None self.epoch_batch_indices: Optional[List[List[int]]] = None self.predictions: Optional[List[List[Any]]] = None # `DDPSpawnPlugin` plugins and derivate don't support return predictions. self._return_predictions: Optional[bool] = None self._previous_grad_status: Optional[bool] = None @property def return_predictions(self) -> bool: return self._return_predictions @return_predictions.setter def return_predictions(self, return_predictions: Optional[bool] = None) -> None: # ``DDPSpawnPlugin`` plugins and derivate don't support return predictions. is_ddp_spawn = isinstance(self.trainer.training_type_plugin, DDPSpawnPlugin) if return_predictions and is_ddp_spawn: raise MisconfigurationException( "`return_predictions` should be set to `False` when using the `DDPSpawnPlugin` or children class. " f"Found {return_predictions} with training_type_plugin {type(self.trainer.training_type_plugin)}." ) # For non ``DDPSpawnPlugin`` plugin, the `return_predictions` is True by default unless user decide otherwise. self._return_predictions = not is_ddp_spawn if return_predictions is None else return_predictions @property def should_store_predictions(self) -> bool: any_pred = any(cb.interval.on_epoch for cb in self.trainer.prediction_writer_callbacks) return self.return_predictions or any_pred def on_trainer_init(self): self.trainer.num_predict_batches = [] self.trainer.predicted_ckpt_path = None def get_predict_dataloaders(self): self.trainer.reset_predict_dataloader(self.trainer.lightning_module) dataloaders = self.trainer.predict_dataloaders max_batches = self.trainer.num_predict_batches return dataloaders, max_batches def should_skip_predict(self, max_batches): return sum(max_batches) == 0 def on_predict_model_eval(self): model_ref = self.trainer.lightning_module model_ref.on_predict_model_eval() def setup(self, max_batches, dataloaders): # convert max_batches to list if isinstance(max_batches, int): max_batches = [max_batches] * len(dataloaders) self.max_batches = max_batches self.num_dataloaders = self._get_num_dataloaders(dataloaders) self.predictions = [[] for _ in range(self.num_dataloaders)] self.epoch_batch_indices = [[] for _ in range(self.num_dataloaders)] def _get_num_dataloaders(self, dataloaders: List[DataLoader]) -> int: # case where user does: # return dl1, dl2 length = len(dataloaders) if len(dataloaders) > 0 and isinstance(dataloaders[0], (list, tuple)): length = len(dataloaders[0]) return length def _build_kwargs(self, batch, batch_idx, dataloader_idx): step_kwargs = OrderedDict([('batch', batch), ('batch_idx', batch_idx)]) if self.num_dataloaders: step_kwargs['dataloader_idx'] = dataloader_idx return step_kwargs def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int) -> None: # configure step_kwargs step_kwargs = self._build_kwargs(batch, batch_idx, dataloader_idx) # extract batch_indices and store them self._store_batch_indices(dataloader_idx) model_ref = self.trainer.lightning_module self.trainer.call_hook("on_predict_batch_start", batch, batch_idx, dataloader_idx) model_ref._current_fx_name = "predict_step" predictions = self.trainer.accelerator.predict_step(step_kwargs) if predictions is None: self.warning_cache.warn( "predict returned None if it was on purpose, ignore this warning..." ) self.trainer.call_hook("on_predict_batch_end", predictions, batch, batch_idx, dataloader_idx) if self.should_store_predictions: self.predictions[dataloader_idx].append(predictions) def _store_batch_indices(self, dataloader_idx: int) -> None: batch_sampler = self.trainer.predict_dataloaders[ dataloader_idx].batch_sampler if isinstance(batch_sampler, IndexBatchSamplerWrapper): self.batch_indices = batch_sampler.batch_indices if self.should_store_predictions: self.epoch_batch_indices[dataloader_idx].append( batch_sampler.batch_indices) def on_predict_start(self) -> None: # enable eval mode + no grads self.on_predict_model_eval() self.trainer.lightning_module.zero_grad() self._previous_grad_status = torch.is_grad_enabled() torch.set_grad_enabled(False) # hook self.trainer.call_hook("on_predict_start") self.trainer.call_hook("on_predict_epoch_start") def on_predict_epoch_end(self) -> Optional[_PREDICT_OUTPUT]: self.trainer.profiler.describe() results = self.predictions self.trainer.call_hook("on_predict_epoch_end", results) if self.return_predictions: return results[0] if self.num_dataloaders == 1 else results def on_predict_end(self): # clear memory. the predictions are extracted in `on_predict_epoch_end`. self.predictions = None self.batch_indices = None # reset grad to its previous status. torch.set_grad_enabled(self._previous_grad_status) # hook self.trainer.call_hook("on_predict_end")
class EvaluationLoop(object): def __init__(self, trainer): self.trainer = trainer self.outputs = [] self.step_metrics = [] self.predictions = None self.max_batches = None self.warning_cache = WarningCache() self.num_dataloaders = None def on_trainer_init(self): self.trainer.num_val_batches = [] self.trainer.num_sanity_val_batches = [] self.trainer.num_test_batches = [] self.trainer.test_dataloaders = None self.trainer.val_dataloaders = None self.trainer.running_sanity_check = False # when .test() is called, it sets this self.trainer.tested_ckpt_path = None # when true, prints test results self.trainer.verbose_test = True def get_evaluation_dataloaders(self, max_batches): # select dataloaders model = self.trainer.lightning_module # select dataloaders if self.trainer.testing: self.trainer.reset_test_dataloader(model) dataloaders = self.trainer.test_dataloaders new_max_batches = self.trainer.num_test_batches else: # val in_sanity_check = self.trainer.running_sanity_check should_reload_every_epoch = self.trainer.reload_dataloaders_every_epoch if (self.trainer.val_dataloaders is None or should_reload_every_epoch) and not in_sanity_check: self.trainer.reset_val_dataloader(model) dataloaders = self.trainer.val_dataloaders new_max_batches = self.trainer.num_val_batches if max_batches is None: max_batches = new_max_batches return dataloaders, max_batches def should_skip_evaluation(self, max_batches): return sum(max_batches) == 0 def on_evaluation_start(self, *args, **kwargs): if self.trainer.testing: self.trainer.call_hook('on_test_start', *args, **kwargs) else: self.trainer.call_hook('on_validation_start', *args, **kwargs) def on_evaluation_model_eval(self, *_, **__): model_ref = self.trainer.lightning_module if self.trainer.testing: model_ref.on_test_model_eval() else: model_ref.on_validation_model_eval() def on_evaluation_model_train(self, *_, **__): model_ref = self.trainer.lightning_module if self.trainer.testing: model_ref.on_test_model_train() else: model_ref.on_validation_model_train() def on_evaluation_end(self, *args, **kwargs): if self.trainer.testing: self.trainer.call_hook('on_test_end', *args, **kwargs) else: self.trainer.call_hook('on_validation_end', *args, **kwargs) def reload_evaluation_dataloaders(self): model = self.trainer.lightning_module if self.trainer.testing: self.trainer.reset_test_dataloader(model) else: self.trainer.reset_val_dataloader(model) def setup(self, model, max_batches, dataloaders): # bookkeeping self.outputs = [] self.predictions = PredictionCollection(self.trainer.global_rank, self.trainer.world_size) # convert max_batches to list if isinstance(max_batches, int): max_batches = [max_batches] * len(dataloaders) self.max_batches = max_batches self.num_dataloaders = self._get_num_dataloaders(dataloaders) self._predictions = [[] for _ in range(self.num_dataloaders)] def on_evaluation_epoch_start(self, *args, **kwargs): if self.trainer.testing: self.trainer.call_hook('on_test_epoch_start', *args, **kwargs) else: self.trainer.call_hook('on_validation_epoch_start', *args, **kwargs) def _build_args(self, batch, batch_idx, dataloader_idx): # make dataloader_idx arg in validation_step optional args = [batch, batch_idx] multiple_val_loaders = ( not self.trainer.testing and self._get_num_dataloaders(self.trainer.val_dataloaders) > 1 ) multiple_test_loaders = (self.trainer.testing and self._get_num_dataloaders(self.trainer.test_dataloaders) > 1) if multiple_test_loaders or multiple_val_loaders: args.append(dataloader_idx) return args def _get_num_dataloaders(self, dataloaders): # case where user does: # return dl1, dl2 length = len(dataloaders) if len(dataloaders) > 0 and isinstance(dataloaders[0], (list, tuple)): length = len(dataloaders[0]) return length def evaluation_step(self, batch, batch_idx, dataloader_idx): # configure args args = self._build_args(batch, batch_idx, dataloader_idx) model_ref = self.trainer.lightning_module model_ref._results = Result() if self.testing: model_ref._current_fx_name = "test_step" with self.trainer.profiler.profile("test_step"): output = self.trainer.accelerator.test_step(args) else: model_ref._current_fx_name = "validation_step" with self.trainer.profiler.profile("validation_step"): output = self.trainer.accelerator.validation_step(args) # capture any logged information self.trainer.logger_connector.cache_logged_metrics() # track batch size for weighted average is_result_obj = isinstance(output, Result) if is_result_obj: output.track_batch_size(batch) return output def evaluation_step_end(self, *args, **kwargs): if self.trainer.testing: output = self.trainer.call_hook('test_step_end', *args, **kwargs) else: output = self.trainer.call_hook('validation_step_end', *args, **kwargs) return output def evaluation_epoch_end(self): # unset dataloder_idx in model self.trainer.logger_connector.evaluation_epoch_end(self.trainer.testing) # call the model epoch end deprecated_results = self.__run_eval_epoch_end(self.num_dataloaders) # enable returning anything for i, r in enumerate(deprecated_results): if not isinstance(r, (dict, Result, torch.Tensor)): deprecated_results[i] = [] return deprecated_results def log_epoch_metrics_on_evaluation_end(self): # get the final loop results eval_loop_results = self.trainer.logger_connector.get_evaluate_epoch_results() return eval_loop_results def __run_eval_epoch_end(self, num_dataloaders): model = self.trainer.lightning_module # with a single dataloader don't pass an array outputs = self.outputs eval_results = outputs if num_dataloaders == 1: eval_results = outputs[0] user_reduced = False if self.trainer.testing: if is_overridden('test_epoch_end', model=model): model._current_fx_name = 'test_epoch_end' eval_results = model.test_epoch_end(eval_results) user_reduced = True else: if is_overridden('validation_epoch_end', model=model): model._current_fx_name = 'validation_epoch_end' eval_results = model.validation_epoch_end(eval_results) user_reduced = True # capture logging self.trainer.logger_connector.cache_logged_metrics() # depre warning if eval_results is not None and user_reduced: step = 'testing_epoch_end' if self.trainer.testing else 'validation_epoch_end' self.warning_cache.warn( f'The {step} should not return anything as of 9.1.' ' To log, use self.log(...) or self.write(...) directly in the LightningModule' ) if not isinstance(eval_results, list): eval_results = [eval_results] # track depreceated metrics self.trainer.logger_connector.track_metrics_deprecated(eval_results) return eval_results def __gather_epoch_end_eval_results(self, outputs): eval_results = [] for epoch_output in outputs: result = epoch_output[0].__class__.gather(epoch_output) if 'checkpoint_on' in result: result.checkpoint_on = result.checkpoint_on.mean() if 'early_stop_on' in result: result.early_stop_on = result.early_stop_on.mean() eval_results.append(result) # with 1 dataloader don't pass in a list if len(eval_results) == 1: eval_results = eval_results[0] return eval_results def __auto_reduce_result_objs(self, outputs): # outputs has a list of results per dataloader eval_results = [] for dl_output in outputs: result = dl_output[0] result = result.__class__.reduce_on_epoch_end(dl_output) if 'checkpoint_on' in result: result.checkpoint_on = result.checkpoint_on.mean() if 'early_stop_on' in result: result.early_stop_on = result.early_stop_on.mean() eval_results.append(result) return eval_results def on_predict_epoch_end(self): self.trainer._progress_bar_callback.on_test_end(self.trainer, self.trainer.lightning_module) results = self._predictions def _convert_to_numpy(v): return v.cpu().numpy() results = apply_to_collection(results, torch.Tensor, _convert_to_numpy) return results, None def on_evaluation_batch_start(self, batch, batch_idx, dataloader_idx): # set dataloader_idx to model and track batch_size self.trainer.logger_connector.on_evaluation_batch_start( self.trainer.testing, batch, dataloader_idx, self.num_dataloaders ) if self.trainer.testing: self.trainer.call_hook('on_test_batch_start', batch, batch_idx, dataloader_idx) else: self.trainer.call_hook('on_validation_batch_start', batch, batch_idx, dataloader_idx) def on_evaluation_batch_end(self, output, batch, batch_idx, dataloader_idx): if self.trainer.testing: self.trainer.call_hook('on_test_batch_end', output, batch, batch_idx, dataloader_idx) else: self.trainer.call_hook('on_validation_batch_end', output, batch, batch_idx, dataloader_idx) # store predicitons if do_write_predictions and track eval loss history self.store_predictions(output, batch_idx, dataloader_idx) def store_predictions(self, output, batch_idx, dataloader_idx): # Add step predictions to prediction collection to write later if output is not None: do_write_predictions = isinstance(output, Result) and self.trainer.testing if do_write_predictions: self.predictions.add(output.pop('predictions', None)) # track debug metrics self.trainer.dev_debugger.track_eval_loss_history(batch_idx, dataloader_idx, output) def on_evaluation_epoch_end(self, *args, **kwargs): # call the callback hook if self.trainer.testing: self.trainer.call_hook('on_test_epoch_end', *args, **kwargs) else: self.trainer.call_hook('on_validation_epoch_end', *args, **kwargs) self.trainer.call_hook('on_epoch_end') def log_evaluation_step_metrics(self, output, batch_idx): if self.trainer.running_sanity_check: return step_log_metrics = {} step_pbar_metrics = {} self.__log_result_step_metrics(step_log_metrics, step_pbar_metrics, batch_idx) def __log_result_step_metrics(self, step_log_metrics, step_pbar_metrics, batch_idx): cached_results = self.trainer.logger_connector.cached_results cached_batch_pbar_metrics, cached_batch_log_metrics = cached_results.update_logger_connector() step_log_metrics.update(cached_batch_log_metrics) step_pbar_metrics.update(cached_batch_pbar_metrics) if len(step_log_metrics) > 0: # make the metrics appear as a different line in the same graph metrics_by_epoch = {} for k, v in step_log_metrics.items(): metrics_by_epoch[f'{k}/epoch_{self.trainer.current_epoch}'] = v self.trainer.logger_connector.log_metrics(metrics_by_epoch, {}, step=batch_idx) if len(step_pbar_metrics) > 0: self.trainer.logger_connector.add_progress_bar_metrics(step_pbar_metrics)
class TrainLoop: def __init__( self, trainer, max_epochs: Optional[int], min_epochs: Optional[int], max_steps: Optional[int], min_steps: Optional[int], num_sanity_val_steps: int, ): self.trainer = trainer self.accumulated_loss = None self.warning_cache = WarningCache() self._teardown_already_run = False self.running_loss = TensorRunningAccum(window_length=20) self._skip_backward = False self._optimizer_freq_cumsum = None self._hiddens = None self.global_step = 0 self.current_epoch = 0 self.trainer.should_stop = False # the total batch index across all epochs self.total_batch_idx = 0 # the current batch index in the loop that runs over the dataloader(s) self.batch_idx = 0 # the current split index when the batch gets split into chunks in truncated backprop through time self.split_idx = None self.trainer.num_training_batches = 0 self.trainer.train_dataloader = None # If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000 self.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs # If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1 self.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs self.max_steps = max_steps self.min_steps = min_steps if num_sanity_val_steps == -1: self.trainer.num_sanity_val_steps = float("inf") else: self.trainer.num_sanity_val_steps = num_sanity_val_steps @property def num_active_optimizers(self) -> int: return len(self.get_active_optimizers()) @property def optimizer_freq_cumsum(self): if self._optimizer_freq_cumsum is None: self._optimizer_freq_cumsum = np.cumsum( self.trainer.optimizer_frequencies) return self._optimizer_freq_cumsum def should_skip_training(self) -> bool: should_by_max_steps = self.max_steps is not None and self.global_step >= self.max_steps should_by_epoch = self.max_epochs is not None and self.current_epoch >= self.max_epochs return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0 def on_train_start(self): # hook self.trainer.call_hook("on_train_start") def on_train_end(self): if self._teardown_already_run: return self._teardown_already_run = True # trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates # when a checkpoint was saved at the last step self.global_step -= 1 self.check_checkpoint_callback(should_update=True, is_last=True) self.global_step += 1 # hook self.trainer.call_hook("on_train_end") # todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers. # It might be related to xla tensors blocked when moving the cpu # kill loggers if self.trainer.logger is not None: self.trainer.logger.finalize("success") # summarize profile results self.trainer.profiler.describe() # give accelerators a chance to finish self.trainer.accelerator.on_train_end() # reset bookkeeping self.trainer.state.stage = None def check_checkpoint_callback(self, should_update, is_last=False): # TODO bake this logic into the ModelCheckpoint callback if should_update and self.trainer.checkpoint_connector.has_trained: callbacks = self.trainer.checkpoint_callbacks if is_last and any(cb.save_last and cb.verbose for cb in callbacks): rank_zero_info("Saving latest checkpoint...") model = self.trainer.lightning_module for cb in callbacks: cb.on_validation_end(self.trainer, model) def on_train_epoch_start(self, epoch): # update training progress in trainer self.current_epoch = epoch model = self.trainer.lightning_module # reset train dataloader if epoch != 0 and self.trainer.reload_dataloaders_every_epoch: self.trainer.reset_train_dataloader(model) # todo: specify the possible exception with suppress(Exception): # set seed for distributed sampler (enables shuffling for each epoch) self.trainer.train_dataloader.sampler.set_epoch(epoch) # changing gradient according accumulation_scheduler self.trainer.accumulation_scheduler.on_train_epoch_start( self.trainer, self.trainer.lightning_module) # stores accumulated grad fractions per batch self.accumulated_loss = TensorRunningAccum( window_length=self.trainer.accumulate_grad_batches) # hook self.trainer.call_hook("on_epoch_start") self.trainer.call_hook("on_train_epoch_start") def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx): batch_end_outputs = [ opt_idx_out for opt_idx_out in batch_end_outputs if len(opt_idx_out) ] processed_batch_end_outputs = TrainLoop._prepare_outputs( batch_end_outputs, batch_mode=True) # hook self.trainer.call_hook('on_train_batch_end', processed_batch_end_outputs, batch, batch_idx, dataloader_idx) self.trainer.call_hook('on_batch_end') # figure out what to track for epoch end self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs) # reset batch logger internals self.trainer.logger_connector.on_train_batch_end() def reset_train_val_dataloaders(self, model) -> None: """ Resets train and val dataloaders if none are attached to the trainer. The val dataloader must be initialized before training loop starts, as the training loop inspects the val dataloader to determine whether to run the evaluation loop. """ if self.trainer.train_dataloader is None: self.trainer.reset_train_dataloader(model) if self.trainer.val_dataloaders is None: self.trainer.reset_val_dataloader(model) def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs): hook_overridden = self._should_add_batch_output_to_epoch_output() # track the outputs to reduce at the end of the epoch for opt_idx, opt_outputs in enumerate(batch_end_outputs): sample_output = opt_outputs[-1] # decide if we need to reduce at the end of the epoch automatically auto_reduce_tng_result = isinstance( sample_output, Result) and sample_output.should_reduce_on_epoch_end # only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end if not (hook_overridden or auto_reduce_tng_result): continue # with 1 step (no tbptt) don't use a sequence at epoch end if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance( opt_outputs[0], Result): opt_outputs = opt_outputs[0] epoch_output[opt_idx].append(opt_outputs) def _should_add_batch_output_to_epoch_output(self) -> bool: # We add to the epoch outputs if # 1. The model defines training_epoch_end OR # 2. The model overrides on_train_epoch_end which has `outputs` in the signature # TODO: in v1.5 this only needs to check if training_epoch_end is overridden lightning_module = self.trainer.lightning_module if is_overridden("training_epoch_end", model=lightning_module): return True if is_overridden("on_train_epoch_end", model=lightning_module): model_hook_fx = getattr(lightning_module, "on_train_epoch_end") if is_param_in_hook_signature(model_hook_fx, "outputs"): return True return False def get_active_optimizers( self, batch_idx: Optional[int] = None) -> List[Tuple[int, Optimizer]]: """ Returns the currently active optimizers. When multiple optimizers are used with different frequencies, only one of the optimizers is active at a time. Returns: A list of tuples (opt_idx, optimizer) of currently active optimizers. """ if not self.trainer.optimizer_frequencies: # call training_step once per optimizer return list(enumerate(self.trainer.optimizers)) batch_idx = self.total_batch_idx if batch_idx is None else batch_idx optimizers_loop_length = self.optimizer_freq_cumsum[-1] current_place_in_loop = batch_idx % optimizers_loop_length # find optimzier index by looking for the first {item > current_place} in the cumsum list opt_idx = int( np.argmax(self.optimizer_freq_cumsum > current_place_in_loop)) return [(opt_idx, self.trainer.optimizers[opt_idx])] def on_after_backward(self, training_step_output, batch_idx, untouched_loss): training_step_output.detach() # insert after step hook self.trainer.call_hook("on_after_backward") # when in dev debugging track the losses self.trainer.dev_debugger.track_train_loss_history( batch_idx, untouched_loss.detach()) def _check_training_step_output(self, training_step_output): if isinstance( training_step_output, torch.Tensor ) and not self.trainer.lightning_module.automatic_optimization: if training_step_output.grad_fn is None: # TODO: Find why - RuntimeError: Expected to mark a variable ready only once ... raise MisconfigurationException( "In manual optimization, `training_step` should not return a Tensor" ) def training_step(self, split_batch, batch_idx, opt_idx, hiddens): # give the PL module a result for logging model_ref = self.trainer.lightning_module with self.trainer.profiler.profile("model_forward"): step_kwargs = self._build_kwargs(split_batch, batch_idx, opt_idx, hiddens) # manually capture logged metrics model_ref._current_fx_name = 'training_step' model_ref._results = Result() with self.trainer.profiler.profile("training_step"): training_step_output = self.trainer.accelerator.training_step( step_kwargs) self.trainer.accelerator.post_training_step() self.trainer.logger_connector.cache_logged_metrics() self._check_training_step_output(training_step_output) training_step_output = self.trainer.call_hook( "training_step_end", training_step_output) training_step_output_for_epoch_end, training_step_output = self._process_training_step_output( training_step_output, split_batch) if training_step_output_for_epoch_end is None: return # enable empty loss when using manual opt closure_loss = None untouched_loss = None if self.trainer.lightning_module.automatic_optimization: # accumulate loss. if accumulate_grad_batches==1, no effect closure_loss = training_step_output.minimize / self.trainer.accumulate_grad_batches # the loss will get scaled for amp. avoid any modifications to it untouched_loss = closure_loss.detach().clone() # result result = AttributeDict( closure_loss=closure_loss, loss=untouched_loss, training_step_output=training_step_output, training_step_output_for_epoch_end= training_step_output_for_epoch_end, ) return result def _process_training_step_output(self, training_step_output, split_batch): training_step_output_for_epoch_end = training_step_output # enable validation_step return None if training_step_output_for_epoch_end is None: return None, None result = self.trainer.lightning_module._results loss = None hiddens = None result["extra"] = {} # handle dict return if isinstance(training_step_output, dict): loss = training_step_output.pop("loss", None) hiddens = training_step_output.pop("hiddens", None) if hiddens is not None: hiddens = hiddens.detach() result["extra"] = training_step_output # handle scalar return elif isinstance(training_step_output, torch.Tensor): loss = training_step_output # map to results under the hood result.minimize = loss self._hiddens = hiddens # track batch for manual reduction with result result.track_batch_size(len(split_batch)) # track metrics without grads for epoch reduction training_step_output_for_epoch_end = copy(result) training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach( ) if self.trainer.move_metrics_to_cpu: training_step_output_for_epoch_end = training_step_output_for_epoch_end.cpu( ) return training_step_output_for_epoch_end, result @staticmethod def _prepare_outputs( outputs: List[List[List[Result]]], batch_mode: bool, ) -> Union[List[List[List[Dict]]], List[List[Dict]], List[Dict], Dict]: """ Extract required information from batch or epoch end results. Args: outputs: A 3-dimensional list of ``Result`` objects with dimensions: [optimizer outs][batch outs][tbptt steps]. batch_mode: If True, ignore the batch output dimension. Returns: The cleaned outputs with ``Result`` objects converted to dictionaries. All list dimensions of size one will be collapsed. """ processed_outputs = [] for opt_outputs in outputs: # handle an edge case where an optimizer output is the empty list if len(opt_outputs) == 0: continue processed_batch_outputs = [] if batch_mode: opt_outputs = [opt_outputs] for batch_outputs in opt_outputs: processed_tbptt_outputs = [] for tbptt_output in batch_outputs: out = tbptt_output.extra out['loss'] = tbptt_output.minimize processed_tbptt_outputs.append(out) # if there was only one tbptt step then we can collapse that dimension if len(processed_tbptt_outputs) == 1: processed_tbptt_outputs = processed_tbptt_outputs[0] processed_batch_outputs.append(processed_tbptt_outputs) # batch_outputs should be just one dict (or a list of dicts if using tbptt) per optimizer if batch_mode: processed_batch_outputs = processed_batch_outputs[0] processed_outputs.append(processed_batch_outputs) # if there is only one optimiser then we collapse that dimension if len(processed_outputs) == 1: processed_outputs = processed_outputs[0] return processed_outputs def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure): model_ref = self.trainer.lightning_module is_lbfgs = isinstance(optimizer, torch.optim.LBFGS) using_native_amp = self.trainer.amp_backend == AMPType.NATIVE # native amp + lbfgs is a no go right now if using_native_amp and is_lbfgs: raise MisconfigurationException( 'native PyTorch amp and lbfgs are not compatible.' ' To request, please file a Github issue in PyTorch and tag @mcarilli' ) # wraps into LightningOptimizer only for running step optimizer = LightningOptimizer._to_lightning_optimizer( optimizer, self.trainer, opt_idx) # model hook model_ref.optimizer_step( self.trainer.current_epoch, batch_idx, optimizer, opt_idx, train_step_and_backward_closure, on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE, using_native_amp=using_native_amp, using_lbfgs=is_lbfgs, ) def on_before_zero_grad(self, optimizer): self.trainer.call_hook('on_before_zero_grad', optimizer) def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx): self.trainer.accelerator.optimizer_zero_grad( self.trainer.current_epoch, batch_idx, optimizer, opt_idx) def track_and_norm_grad(self, optimizer) -> dict: # track gradient norms grad_norm_dict = self._track_gradient_norm() # clip gradients self.trainer.accelerator.clip_gradients( optimizer, self.trainer.gradient_clip_val, gradient_clip_algorithm=self.trainer.gradient_clip_algorithm) return grad_norm_dict def _track_gradient_norm(self): grad_norm_dict = {} if (self.global_step + 1) % self.trainer.log_every_n_steps == 0: if float(self.trainer.track_grad_norm) > 0: model = self.trainer.lightning_module grad_norm_dict = grad_norm(model, self.trainer.track_grad_norm) return grad_norm_dict def _tbptt_split_batch(self, batch: Any) -> List[Any]: splits = [batch] truncated_bptt_enabled = self._truncated_bptt_enabled() if truncated_bptt_enabled: model_ref = self.trainer.lightning_module with self.trainer.profiler.profile("tbptt_split_batch"): splits = model_ref.tbptt_split_batch( batch, self._truncated_bptt_steps()) return splits def run_training_epoch(self): # modify dataloader if needed (ddp, etc...) train_dataloader = self.trainer.accelerator.process_dataloader( self.trainer.train_dataloader) # track epoch output epoch_output = [[] for _ in range(self.num_active_optimizers)] train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader( train_dataloader) dataloader_idx = 0 batch_idx = None is_last_batch = None for batch_idx, (batch, is_last_batch) in train_dataloader: self.batch_idx = batch_idx # ------------------------------------ # TRAINING_STEP + TRAINING_STEP_END # ------------------------------------ with self.trainer.profiler.profile("run_training_batch"): batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx) # when returning -1 from train_step, we end epoch early if batch_output.signal == -1: break # hook # TODO: add outputs to batches self.on_train_batch_end( epoch_output, batch_output.training_step_output_for_epoch_end, batch, batch_idx, dataloader_idx, ) # ----------------------------------------- # SAVE METRICS TO LOGGERS # ----------------------------------------- self.trainer.logger_connector.log_train_step_metrics(batch_output) # ----------------------------------------- # VALIDATE IF NEEDED # ----------------------------------------- should_check_val = self._should_check_val_fx( batch_idx, is_last_batch) if should_check_val: self.trainer.validating = True self.trainer._run_evaluation() self.trainer.training = True # ----------------------------------------- # SAVE LOGGERS (ie: Tensorboard, etc...) # ----------------------------------------- self.save_loggers_on_train_batch_end() # update LR schedulers monitor_metrics = deepcopy( self.trainer.logger_connector.callback_metrics) self.update_train_loop_lr_schedulers( monitor_metrics=monitor_metrics) self.trainer.checkpoint_connector.has_trained = True # max steps reached, end training if (self.max_steps is not None and self.max_steps <= self.global_step + 1 and self._accumulated_batches_reached()): break # end epoch early # stop when the flag is changed or we've gone past the amount # requested in the batches if self.trainer.should_stop: break self.total_batch_idx += 1 # stop epoch if we limited the number of training batches if self._num_training_batches_reached(is_last_batch): break # progress global step according to grads progress self.increment_accumulated_grad_global_step() if batch_idx is None: # dataloader/iterator did not produce a batch return # handle epoch_output on epoch end self.on_train_epoch_end(epoch_output) # log epoch metrics self.trainer.logger_connector.log_train_epoch_end_metrics(epoch_output) should_check_val = self._should_check_val_fx(batch_idx, is_last_batch, on_epoch=True) should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation( self.trainer.num_val_batches) should_train_only = self.trainer.disable_validation or should_skip_eval # update epoch level lr_schedulers if no val loop outside train loop is triggered if not should_check_val or should_train_only: self.trainer.optimizer_connector.update_learning_rates( interval='epoch') if should_train_only: self.check_checkpoint_callback(True) if should_check_val: self.trainer.validating = True self.trainer._run_evaluation(on_epoch=True) self.trainer.training = True # increment the global step once # progress global step according to grads progress self.increment_accumulated_grad_global_step() def on_train_epoch_end(self, epoch_output: List[List[List[Result]]]) -> None: # inform logger the batch loop has finished self.trainer.logger_connector.on_train_epoch_end() # prepare epoch output processed_epoch_output = TrainLoop._prepare_outputs(epoch_output, batch_mode=False) # get the model and call model.training_epoch_end model = self.trainer.lightning_module if is_overridden('training_epoch_end', model=model): # run training_epoch_end # refresh the result for custom logging at the epoch level model._current_fx_name = 'training_epoch_end' training_epoch_end_output = model.training_epoch_end( processed_epoch_output) if training_epoch_end_output is not None: raise MisconfigurationException( 'training_epoch_end expects a return of None. ' 'HINT: remove the return statement in training_epoch_end') # capture logging self.trainer.logger_connector.cache_logged_metrics() # call train epoch end hooks self._on_train_epoch_end_hook(processed_epoch_output) self.trainer.call_hook('on_epoch_end') def _on_train_epoch_end_hook(self, processed_epoch_output) -> None: # We cannot rely on Trainer.call_hook because the signatures might be different across # lightning module and callback # As a result, we need to inspect if the module accepts `outputs` in `on_train_epoch_end` # This implementation is copied from Trainer.call_hook hook_name = "on_train_epoch_end" # set hook_name to model + reset Result obj skip = self.trainer._reset_result_and_set_fx_name(hook_name) # always profile hooks with self.trainer.profiler.profile(hook_name): # first call trainer hook if hasattr(self.trainer, hook_name): trainer_hook = getattr(self.trainer, hook_name) trainer_hook(processed_epoch_output) # next call hook in lightningModule model_ref = self.trainer.lightning_module if is_overridden(hook_name, model_ref): hook_fx = getattr(model_ref, hook_name) if is_param_in_hook_signature(hook_fx, "outputs"): self.warning_cache.warn( "The signature of `ModelHooks.on_train_epoch_end` has changed in v1.3." " `outputs` parameter has been deprecated." " Support for the old signature will be removed in v1.5", DeprecationWarning) model_ref.on_train_epoch_end(processed_epoch_output) else: model_ref.on_train_epoch_end() # if the PL module doesn't have the hook then call the accelerator # used to auto-reduce things for the user with Results obj elif hasattr(self.trainer.accelerator, hook_name): accelerator_hook = getattr(self.trainer.accelerator, hook_name) accelerator_hook() if not skip: self.trainer._cache_logged_metrics() def run_training_batch(self, batch, batch_idx, dataloader_idx): # track grad norms grad_norm_dict = {} # bookkeeping self._hiddens = None optimizers = list(enumerate(self.trainer.optimizers)) # track all outputs across time and num of optimizers batch_outputs = [[] for _ in range(len(optimizers))] if batch is None: self.warning_cache.warn( "train_dataloader yielded None. If this was on purpose, ignore this warning..." ) return AttributeDict( signal=0, grad_norm_dict={}, training_step_output_for_epoch_end=batch_outputs, ) # hook response = self.trainer.call_hook("on_batch_start") if response == -1: return AttributeDict(signal=-1, grad_norm_dict={}) # hook response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx) if response == -1: return AttributeDict(signal=-1, grad_norm_dict={}) # lightning module hook splits = self._tbptt_split_batch(batch) for split_idx, split_batch in enumerate(splits): self.split_idx = split_idx if self.trainer.lightning_module.automatic_optimization: for opt_idx, optimizer in self.get_active_optimizers( batch_idx): result = self._run_optimization(batch_idx, split_idx, split_batch, opt_idx, optimizer) if result: batch_outputs[opt_idx].append( result.training_step_output_for_epoch_end) grad_norm_dict = result.get("grad_norm_dict", {}) else: # in manual optimization, there is no looping over optimizers result = self._run_optimization(batch_idx, split_idx, split_batch) if result: batch_outputs[0].append( result.training_step_output_for_epoch_end) output = AttributeDict( signal=0, # todo: Properly aggregate grad_norm accros opt_idx and split_idx grad_norm_dict=grad_norm_dict, training_step_output_for_epoch_end=batch_outputs, ) return output def _run_optimization(self, batch_idx, split_idx, split_batch, opt_idx=0, optimizer=None): # TODO: In v1.5, when optimizer_idx gets removed from training_step in manual_optimization, change # opt_idx=0 to opt_idx=None in the signature here # toggle model params + set info to logger_connector self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer) result = AttributeDict() closure = self.make_closure(split_batch, batch_idx, opt_idx, optimizer, self._hiddens, result) if self.should_accumulate(): # For gradient accumulation # ------------------- # calculate loss (train step + train step end) # ------------------- # automatic_optimization=True: perform ddp sync only when performing optimizer_step # automatic_optimization=False: don't block synchronization here with self.block_ddp_sync_behaviour(): closure() # ------------------------------ # BACKWARD PASS # ------------------------------ # gradient update with accumulated gradients else: if self.trainer.lightning_module.automatic_optimization: self.optimizer_step(optimizer, opt_idx, batch_idx, closure) else: result = self.training_step(split_batch, batch_idx, opt_idx, self._hiddens) if not result: # user decided to skip optimization return result # update running loss + reset accumulated loss self.update_running_loss(result.loss) self._process_closure_result(result) return result def training_step_and_backward_closure( self, split_batch: Any, batch_idx: int, opt_idx: int, optimizer: Optimizer, hiddens, return_result: AttributeDict, ) -> Optional[torch.Tensor]: step_result = self.training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens) if step_result is not None: return_result.update(step_result) return return_result.loss def make_closure(self, *closure_args, **closure_kwargs: Any) -> Callable: """ Wraps the training step closure into a partial object which will be called within ``optimizer.step``. """ partial_func = partial(self.training_step_and_backward_closure, *closure_args, **closure_kwargs) return update_wrapper(partial_func, self.training_step_and_backward_closure) @contextmanager def block_ddp_sync_behaviour(self, should_block_sync: bool = False): """ automatic_optimization = True Blocks ddp sync gradients behaviour on backwards pass. This is useful for skipping sync when accumulating gradients, reducing communication overhead automatic_optimization = False do not block ddp gradient sync when using manual optimization as gradients are needed within the training step Returns: context manager with sync behaviour off """ if (isinstance(self.trainer.training_type_plugin, ParallelPlugin) and (self.trainer.lightning_module.automatic_optimization or should_block_sync)): with self.trainer.training_type_plugin.block_backward_sync(): yield None else: yield None def _process_closure_result( self, opt_closure_result: Optional[AttributeDict]) -> None: if not opt_closure_result: return # cache metrics self.trainer.logger_connector.cache_training_step_metrics( opt_closure_result) # check if loss or model weights are nan if self.trainer.terminate_on_nan: self._check_finite(opt_closure_result.loss) def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens): """Wrap forward, zero_grad and backward in a closure so second order methods work""" with self.trainer.profiler.profile("training_step_and_backward"): # lightning module hook result = self.training_step(split_batch, batch_idx, opt_idx, hiddens) if not self._skip_backward and self.trainer.lightning_module.automatic_optimization: is_first_batch_to_accumulate = batch_idx % self.trainer.accumulate_grad_batches == 0 if is_first_batch_to_accumulate: self.on_before_zero_grad(optimizer) self.optimizer_zero_grad(batch_idx, optimizer, opt_idx) # backward pass if result is not None: with self.trainer.profiler.profile("backward"): self.backward(result, optimizer, opt_idx) # hook - call this hook only # when gradients have finished to accumulate if not self.should_accumulate(): self.on_after_backward(result.training_step_output, batch_idx, result.loss) # check if loss or model weights are nan if self.trainer.terminate_on_nan: self._check_finite(result.loss) else: self.warning_cache.warn( "training_step returned None. If this was on purpose, ignore this warning..." ) if len(self.trainer.optimizers) > 1: # revert back to previous state self.trainer.lightning_module.untoggle_optimizer(opt_idx) return result def _check_finite(self, loss: torch.Tensor) -> None: if not torch.isfinite(loss).all(): raise ValueError( f'The loss returned in `training_step` is {loss}.') model = self.trainer.lightning_module detect_nan_parameters(model) def backward(self, result, optimizer, opt_idx, *args, **kwargs): self.trainer.dev_debugger.track_event("backward_call") should_accumulate = self.should_accumulate() # backward can be called manually in the training loop if isinstance(result, torch.Tensor): self.trainer.accelerator.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs) else: result.closure_loss = self.trainer.accelerator.backward( result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs) if not self.should_accumulate(): # track gradients result.grad_norm_dict = self.track_and_norm_grad( optimizer=optimizer) def update_train_loop_lr_schedulers(self, monitor_metrics=None): num_accumulated_batches_reached = self._accumulated_batches_reached() num_training_batches_reached = self._num_training_batches_reached() if num_accumulated_batches_reached or num_training_batches_reached: # update lr self.trainer.optimizer_connector.update_learning_rates( interval="step", monitor_metrics=monitor_metrics, opt_indices=[ opt_idx for opt_idx, _ in self.get_active_optimizers() ], ) def increment_accumulated_grad_global_step(self): num_accumulated_batches_reached = self._accumulated_batches_reached() num_training_batches_reached = self._num_training_batches_reached() # progress global step according to grads progress if num_accumulated_batches_reached or num_training_batches_reached: self.global_step = self.trainer.accelerator.update_global_step( self.total_batch_idx, self.global_step) def _accumulated_batches_reached(self): return (self.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0 def _num_training_batches_reached(self, is_last_batch=False): return (self.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch def should_accumulate(self): # checks if backward or backward + optimizer step (via closure) accumulation_done = self._accumulated_batches_reached() is_final_batch = self._num_training_batches_reached() return not (accumulation_done or is_final_batch) def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool, on_epoch: bool = False) -> bool: """ Decide if we should run validation. """ if not self.trainer.enable_validation: return False # check if this epoch is eligible to run validation if (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch != 0: return False # val_check_batch is inf for iterable datasets with no length defined # TODO: let training/eval loop handle logic around limit_*_batches and val_check_batch is_val_check_batch = False if isinstance(self.trainer.limit_train_batches, int) and self.trainer.val_check_batch == float('inf'): is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0 elif self.trainer.val_check_batch != float('inf'): is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0 # Note: num_training_batches is also inf for iterable datasets with no length defined epoch_end_val_check = (batch_idx + 1) % self.trainer.num_training_batches == 0 is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float( "inf") if on_epoch: return ( is_val_check_batch and epoch_end_val_check ) or self.trainer.should_stop or is_last_batch_for_infinite_dataset else: return is_val_check_batch and not epoch_end_val_check def _build_kwargs(self, batch, batch_idx, opt_idx, hiddens): # enable not needing to add opt_idx to training_step step_kwargs = OrderedDict([('batch', batch), ('batch_idx', batch_idx)]) lightning_module = self.trainer.lightning_module if len(self.trainer.optimizers) > 1: training_step_fx = getattr(lightning_module, "training_step") has_opt_idx_in_train_step = is_param_in_hook_signature( training_step_fx, "optimizer_idx") if has_opt_idx_in_train_step: if not lightning_module.automatic_optimization: self.warning_cache.warn( "`training_step` hook signature has changed in v1.3." " `optimizer_idx` argument has been removed in case of manual optimization. Support for" " the old signature will be removed in v1.5", DeprecationWarning) step_kwargs['optimizer_idx'] = opt_idx elif not has_opt_idx_in_train_step and self.trainer.lightning_module.automatic_optimization: raise ValueError( f"Your LightningModule defines {len(self.trainer.optimizers)} optimizers but" ' `training_step` is missing the `optimizer_idx` argument.' ) # pass hiddens if using tbptt if self._truncated_bptt_enabled(): step_kwargs['hiddens'] = hiddens return step_kwargs def _truncated_bptt_enabled(self) -> bool: """ Temporary tbptt utilities until this flag is fully migrated to the lightning module. """ return self._truncated_bptt_steps() > 0 def _truncated_bptt_steps(self) -> int: lightning_module = self.trainer.lightning_module # Give precedence to the LightningModule as the Trainer flag will be removed in v1.5 if lightning_module.truncated_bptt_steps > 0: return lightning_module.truncated_bptt_steps return self.trainer.truncated_bptt_steps or 0 def save_loggers_on_train_batch_end(self): # when loggers should save to disk should_flush_logs = self.trainer.logger_connector.should_flush_logs if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None: self.trainer.logger.save() def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer): # make sure only the gradients of the current optimizer's parameters are calculated # in the training step to prevent dangling gradients in multiple-optimizer setup. if self.trainer.lightning_module.automatic_optimization and len( self.trainer.optimizers) > 1: model = self.trainer.lightning_module model.toggle_optimizer(optimizer, opt_idx) # use to track metrics internally self.trainer.logger_connector.on_train_split_start( split_idx, opt_idx, split_batch) def update_running_loss(self, current_loss: torch.Tensor) -> None: if self.trainer.lightning_module.automatic_optimization: # track total loss for logging (avoid mem leaks) self.accumulated_loss.append(current_loss) accumulated_loss = self.accumulated_loss.mean() if accumulated_loss is not None: # calculate running loss for display self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches) # reset for next set of accumulated grads self.accumulated_loss.reset()
running_special = os.getenv("PL_RUNNING_SPECIAL_TESTS", "0") == "1" if running_special: stderr = StringIO() # recording with redirect_stderr(stderr): _warn("test1") _warn("test2", DeprecationWarning) rank_zero_warn("test3") rank_zero_warn("test4", DeprecationWarning) rank_zero_deprecation("test5") cache = WarningCache() cache.warn("test6") cache.deprecation("test7") output = stderr.getvalue() assert "test_warnings.py:30: UserWarning: test1" in output assert "test_warnings.py:31: DeprecationWarning: test2" in output assert "test_warnings.py:33: UserWarning: test3" in output assert "test_warnings.py:34: DeprecationWarning: test4" in output assert "test_warnings.py:36: LightningDeprecationWarning: test5" in output assert "test_warnings.py:39: UserWarning: test6" in output assert "test_warnings.py:40: LightningDeprecationWarning: test7" in output # check that logging is properly configured
class PredictLoop(object): def __init__(self, trainer): self.trainer = trainer self.max_batches = None self.num_dataloaders = None self.warning_cache = WarningCache() def on_trainer_init(self): self.trainer.num_predict_batches = [] def get_predict_dataloaders(self): self.trainer.reset_predict_dataloader(self.trainer.lightning_module) dataloaders = self.trainer.predict_dataloaders max_batches = self.trainer.num_predict_batches return dataloaders, max_batches def should_skip_predict(self, max_batches): return sum(max_batches) == 0 def on_predict_model_eval(self, *_, **__): model_ref = self.trainer.lightning_module model_ref.on_predict_model_eval() def setup(self, model, max_batches, dataloaders): self.trainer.call_hook("on_predict_start") # copy properties for forward overrides self.trainer.model_connector.copy_trainer_model_properties(model) # convert max_batches to list if isinstance(max_batches, int): max_batches = [max_batches] * len(dataloaders) self.max_batches = max_batches self.num_dataloaders = self._get_num_dataloaders(dataloaders) self._predictions = [[] for _ in range(self.num_dataloaders)] self.trainer._progress_bar_callback.on_predict_start( self.trainer, self.trainer.lightning_module) def _get_num_dataloaders(self, dataloaders): # case where user does: # return dl1, dl2 length = len(dataloaders) if len(dataloaders) > 0 and isinstance(dataloaders[0], (list, tuple)): length = len(dataloaders[0]) return length def predict_step(self, batch, batch_idx, dataloader_idx): # configure args args = [batch, batch_idx] if self.num_dataloaders: args.append(dataloader_idx) model_ref = self.trainer.lightning_module model_ref._current_fx_name = "predict" predictions = self.trainer.accelerator.predict_step(args) if predictions is None: self.warning_cache.warn( "predict returned None if it was on purpose, ignore this warning..." ) self._predictions[dataloader_idx].append(predictions) self.trainer._progress_bar_callback.on_predict_batch_end( self.trainer, model_ref, predictions, batch, batch_idx, dataloader_idx) return def on_predict_epoch_end(self): self.trainer.profiler.describe() self.trainer._progress_bar_callback.on_predict_end( self.trainer, self.trainer.lightning_module) results = self._predictions def _convert_to_numpy(v): return v.cpu().numpy() results = apply_to_collection(results, torch.Tensor, _convert_to_numpy) if len(results) == 1: return results[0] return results def on_predict_start(self): # hook self.trainer.call_hook("on_predict_start") def on_predict_end(self): # hook self.trainer.call_hook("on_predict_end")
class TrainingEpochLoop(loops.Loop[_OUTPUTS_TYPE]): """Runs over all batches in a dataloader (one epoch). Args: min_steps: The minimum number of steps (batches) to process max_steps: The maximum number of steps (batches) to process """ def __init__(self, min_steps: Optional[int] = 0, max_steps: int = -1) -> None: super().__init__() if max_steps is None: rank_zero_deprecation( "Setting `max_steps = None` is deprecated in v1.5 and will no longer be supported in v1.7." " Use `max_steps = -1` instead." ) max_steps = -1 elif max_steps < -1: raise MisconfigurationException( f"`max_steps` must be a non-negative integer or -1 (infinite steps). You passed in {max_steps}." ) self.min_steps = min_steps self.max_steps = max_steps self.global_step: int = 0 self.batch_progress = BatchProgress() self.scheduler_progress = SchedulerProgress() self.batch_loop: Optional[TrainingBatchLoop] = None self.val_loop: Optional["loops.EvaluationLoop"] = None self._results = ResultCollection(training=True) self._outputs: _OUTPUTS_TYPE = [] self._warning_cache = WarningCache() self._dataloader_iter: Optional[Iterator] = None # caches the loaded dataloader state until dataloader objects are available self._dataloader_state_dict: Dict[str, Any] = {} @property def total_batch_idx(self) -> int: """Returns the current batch index (across epochs)""" # use `ready` instead of `completed` in case this is accessed after `completed` has been increased # but before the next `ready` increase return self.batch_progress.total.ready - 1 @property def batch_idx(self) -> int: """Returns the current batch index (within this epoch)""" # use `ready` instead of `completed` in case this is accessed after `completed` has been increased # but before the next `ready` increase return self.batch_progress.current.ready - 1 @property def _is_training_done(self) -> bool: max_steps_reached = _is_max_limit_reached(self.global_step, self.max_steps) return max_steps_reached or self._num_ready_batches_reached() @property def _is_validation_done(self) -> bool: # when we are restarting we want to check whether the val loop has finished return not self.restarting or self.val_loop.done @property def done(self) -> bool: """Returns whether the training should be stopped. The criteria are that the number of steps reached the max steps, the last batch is reached or the trainer signals to stop (e.g. by early stopping). """ return (self._is_training_done and self._is_validation_done) or self.trainer.should_stop def connect( self, batch_loop: TrainingBatchLoop = None, val_loop: Optional["loops.EvaluationLoop"] = None, ) -> None: """Optionally connect a custom batch or validation loop to this training epoch loop.""" if batch_loop is not None: self.batch_loop = batch_loop if val_loop is not None: self.val_loop = val_loop def reset(self) -> None: """Resets the internal state of the loop for a new run.""" assert self.batch_loop is not None assert self.batch_loop.optimizer_loop is not None if self.restarting: self.batch_progress.reset_on_restart() self.scheduler_progress.reset_on_restart() self.batch_loop.optimizer_loop.optim_progress.reset_on_restart() else: self.batch_progress.reset_on_run() self.scheduler_progress.reset_on_run() self.batch_loop.optimizer_loop.optim_progress.reset_on_run() self._outputs = [] def on_run_start(self, data_fetcher: AbstractDataFetcher, **kwargs: Any) -> None: # hook self.trainer.logger_connector.on_epoch_start() self.trainer.call_hook("on_epoch_start") self.trainer.call_hook("on_train_epoch_start") self.trainer.fit_loop.epoch_progress.increment_started() self._reload_dataloader_state_dict(data_fetcher) self._dataloader_iter = _update_dataloader_iter(data_fetcher, self.batch_idx + 1) def advance(self, *args: Any, **kwargs: Any) -> None: """Runs a single training batch. Args: dataloader_iter: the iterator over the dataloader producing the new batch Raises: StopIteration: When the epoch is canceled by the user returning -1 """ if self.restarting and self._should_check_val_fx(self.batch_idx, self.batch_progress.is_last_batch): # skip training and run validation in `on_advance_end` return batch_idx, (batch, self.batch_progress.is_last_batch) = next(self._dataloader_iter) if not self.trainer._data_connector.train_data_fetcher.store_on_device: with self.trainer.profiler.profile("training_batch_to_device"): batch = self.trainer.accelerator.batch_to_device(batch) self.batch_progress.increment_ready() # cache the batch size value to avoid extracting it again after the batch loop runs as the value will be # different if tbptt is enabled batch_size = self.trainer.logger_connector.on_batch_start(batch_idx, batch) if batch is None: self._warning_cache.warn("train_dataloader yielded None. If this was on purpose, ignore this warning...") batch_output = [] else: # hook response = self.trainer.call_hook("on_batch_start") if response == -1: self.batch_progress.increment_processed() raise StopIteration # TODO: Update this in v1.7 (deprecation: #9816) model_fx = self.trainer.lightning_module.on_train_batch_start extra_kwargs = ( {"dataloader_idx": 0} if callable(model_fx) and is_param_in_hook_signature(model_fx, "dataloader_idx", explicit=True) else {} ) # hook response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, **extra_kwargs) if response == -1: self.batch_progress.increment_processed() raise StopIteration self.batch_progress.increment_started() with self.trainer.profiler.profile("run_training_batch"): batch_output = self.batch_loop.run(batch, batch_idx) self.trainer._results.batch_size = batch_size self.batch_progress.increment_processed() # update non-plateau LR schedulers # update epoch-interval ones only when we are at the end of training epoch self.update_lr_schedulers("step", update_plateau_schedulers=False) if self._num_ready_batches_reached(): self.update_lr_schedulers("epoch", update_plateau_schedulers=False) batch_end_outputs = self._prepare_outputs_training_batch_end( batch_output, automatic=self.trainer.lightning_module.trainer.lightning_module.automatic_optimization, num_optimizers=len(self.trainer.optimizers), ) # TODO: Update this in v1.7 (deprecation: #9816) model_fx = self.trainer.lightning_module.on_train_batch_end extra_kwargs = ( {"dataloader_idx": 0} if callable(model_fx) and is_param_in_hook_signature(model_fx, "dataloader_idx", explicit=True) else {} ) self.trainer.call_hook("on_train_batch_end", batch_end_outputs, batch, batch_idx, **extra_kwargs) self.trainer.call_hook("on_batch_end") self.trainer.logger_connector.on_batch_end() self.batch_progress.increment_completed() if is_overridden("training_epoch_end", self.trainer.lightning_module): self._outputs.append(batch_output) # ----------------------------------------- # SAVE METRICS TO LOGGERS AND PROGRESS_BAR # ----------------------------------------- self.trainer.logger_connector.update_train_step_metrics() def on_advance_end(self): """Runs validation and Checkpointing if necessary. Raises: StopIteration: if :attr:`done` evaluates to ``True`` to finish this epoch """ # ----------------------------------------- # VALIDATE IF NEEDED + CHECKPOINT CALLBACK # ----------------------------------------- should_check_val = self._should_check_val_fx(self.batch_idx, self.batch_progress.is_last_batch) if should_check_val: self.trainer.validating = True self._run_validation() self.trainer.training = True # ----------------------------------------- # SAVE LOGGERS (ie: Tensorboard, etc...) # ----------------------------------------- self._save_loggers_on_train_batch_end() # update plateau LR scheduler after metrics are logged self.update_lr_schedulers("step", update_plateau_schedulers=True) if not self._should_accumulate(): # progress global step according to grads progress self.global_step += 1 # if training finished, try to exit in `on_run_end` instead as we should have enough time # TODO: @tchaton verify this assumption is True. if not self._is_training_done: # if fault tolerant is enabled and process has been notified, exit. self.trainer._exit_gracefully_on_signal() def on_run_end(self) -> None: """Calls the on_epoch_end hook. Returns: The output of each training step for each optimizer Raises: MisconfigurationException: ``train_epoch_end`` does not return ``None`` """ # inform logger the batch loop has finished self.trainer.logger_connector.epoch_end_reached() # get the model and call model.training_epoch_end model = self.trainer.lightning_module if is_overridden("training_epoch_end", model) and self._outputs: epoch_end_outputs = self._prepare_outputs_training_epoch_end( self._outputs, automatic=model.automatic_optimization, num_optimizers=len(self.trainer.optimizers), ) # run lightning module hook training_epoch_end # refresh the result for custom logging at the epoch level model._current_fx_name = "training_epoch_end" epoch_end_outputs = model.training_epoch_end(epoch_end_outputs) if epoch_end_outputs is not None: raise MisconfigurationException( "`training_epoch_end` expects a return of None. " "HINT: remove the return statement in `training_epoch_end`." ) # free memory self._outputs = [] self.trainer.fit_loop.epoch_progress.increment_processed() # call train epoch end hooks self.trainer.call_hook("on_train_epoch_end") self.trainer.call_hook("on_epoch_end") self.trainer.logger_connector.on_epoch_end() if self._num_ready_batches_reached(): self.update_lr_schedulers("epoch", update_plateau_schedulers=True) # if fault tolerant is enabled and process has been notified, exit. self.trainer._exit_gracefully_on_signal() def teardown(self) -> None: self._results.cpu() self.batch_loop.teardown() self.val_loop.teardown() def on_save_checkpoint(self) -> Dict: state_dict = super().on_save_checkpoint() if ( self.trainer.train_dataloader is None or self._num_completed_batches_reached() # did not finish # TODO: fault-tolerance requires a minimum number of batches so probably should be > 0 or self.batch_progress.current.ready == 0 # did not start ): return state_dict state_dict["dataloader_state_dict"] = self.trainer.train_dataloader.state_dict( has_completed=self._has_completed() ) return state_dict def on_load_checkpoint(self, state_dict: Dict) -> None: # cache the dataloader state dict until the dataloader objects are available self._dataloader_state_dict = state_dict.get("dataloader_state_dict") def _run_validation(self): # reload dataloaders self.val_loop._reload_evaluation_dataloaders() with torch.no_grad(): self.val_loop.run() def _accumulated_batches_reached(self) -> bool: """Determine if accumulation will be finished by the end of the current batch.""" return self.batch_progress.current.ready % self.trainer.accumulate_grad_batches == 0 def _num_ready_batches_reached(self) -> bool: """Checks if we are in the last batch or if there are more batches to follow.""" epoch_finished_on_ready = self.batch_progress.current.ready == self.trainer.num_training_batches return epoch_finished_on_ready or self.batch_progress.is_last_batch def _num_completed_batches_reached(self) -> bool: epoch_finished_on_completed = self.batch_progress.current.completed == self.trainer.num_training_batches dataloader_consumed_successfully = self.batch_progress.is_last_batch and self._has_completed() return epoch_finished_on_completed or dataloader_consumed_successfully def _has_completed(self) -> bool: return self.batch_progress.current.ready == self.batch_progress.current.completed def _should_accumulate(self) -> bool: """Checks if the optimizer step should be performed or gradients should be accumulated for the current step.""" accumulation_done = self._accumulated_batches_reached() # Lightning steps on the final batch is_final_batch = self._num_ready_batches_reached() # but the TTP might not ttp_accumulates_on_final_batch = ( self.trainer.training_type_plugin.handles_gradient_accumulation or not is_final_batch ) return not accumulation_done and ttp_accumulates_on_final_batch @staticmethod def _prepare_outputs_training_batch_end( batch_output: _BATCH_OUTPUTS_TYPE, automatic: bool, num_optimizers: int, ) -> Union[List[List[Dict[str, Any]]], List[Dict[str, Any]]]: """Processes the outputs from the batch loop into the format passed to the ``training_batch_end`` hook. ``(tbptt_steps, n_opt) -> (n_opt, tbptt_steps)``. The optimizer dimension might have been squeezed. """ if not batch_output: return [] # convert optimizer dicts to list if automatic: batch_output = apply_to_collection( batch_output, dtype=dict, function=_convert_optim_dict, num_optimizers=num_optimizers ) array = np.array(batch_output, dtype=object) if array.ndim == 1: array = np.expand_dims(array, 1) array = array.transpose((1, 0)) array = array.squeeze() array = array.tolist() array = _recursive_unpad(array) return array @staticmethod def _prepare_outputs_training_epoch_end( batch_outputs: _OUTPUTS_TYPE, automatic: bool, num_optimizers: int, ) -> Union[List[List[List[Dict[str, Any]]]], List[List[Dict[str, Any]]], List[Dict[str, Any]]]: """Processes the outputs from the batch loop into the format passed to the ``training_epoch_end`` hook. ``(n_batches, tbptt_steps, n_opt) -> (n_opt, n_batches, tbptt_steps)``. All single-element dimensions might have been squeezed. This processing is necessary because the format of the inputs to the ``training_epoch_end`` hook does not match the loop structure and because empty dimensions are squeezed. This could break with loop customization. """ # `batch_outputs` (plural) is the same as `epoch_end_output` (singular) if not batch_outputs: return [] # convert optimizer dicts to list if automatic: batch_outputs = apply_to_collection( batch_outputs, dtype=dict, function=_convert_optim_dict, num_optimizers=num_optimizers ) array = _recursive_pad(batch_outputs) if array.ndim == 2: array = np.expand_dims(array, 2) array = array.transpose((2, 0, 1)) array = array.squeeze() array = array.tolist() array = _recursive_unpad(array) # in case we squeezed from 1-element array to a 0-dim array array = array if isinstance(array, list) else [array] # remove residual empty lists array = [item for item in array if not isinstance(item, list) or len(item)] return array def update_lr_schedulers(self, interval: str, update_plateau_schedulers: bool) -> None: """updates the lr schedulers based on the given interval.""" if interval == "step" and self._should_accumulate(): return active_optimizers = _get_active_optimizers( self.trainer.optimizers, self.trainer.optimizer_frequencies, self.total_batch_idx ) self._update_learning_rates( interval=interval, update_plateau_schedulers=update_plateau_schedulers, opt_indices=[opt_idx for opt_idx, _ in active_optimizers], ) def _update_learning_rates( self, interval: str, update_plateau_schedulers: bool, opt_indices: Optional[List[int]] = None ) -> None: """Update learning rates. Args: interval: either 'epoch' or 'step'. update_plateau_schedulers: control whether ``ReduceLROnPlateau`` or non-plateau schedulers get updated. This is used so non-plateau schedulers can be updated before running validation. Checkpoints are commonly saved during validation, however, on-plateau schedulers might monitor a validation metric so they have to be updated separately. opt_indices: indices of the optimizers to update. """ if not self.trainer.lr_schedulers or not self.trainer.lightning_module.automatic_optimization: return if opt_indices is None: opt_indices = [] for lr_scheduler in self.trainer.lr_schedulers: if isinstance(lr_scheduler["opt_idx"], int) and lr_scheduler["opt_idx"] not in opt_indices: continue if update_plateau_schedulers ^ lr_scheduler["reduce_on_plateau"]: continue current_idx = self.batch_idx if interval == "step" else self.trainer.current_epoch current_idx += 1 # account for both batch and epoch starts from 0 # Take step if call to update_learning_rates matches the interval key and # the current step modulo the schedulers frequency is zero if lr_scheduler["interval"] == interval and current_idx % lr_scheduler["frequency"] == 0: monitor_val = None if lr_scheduler["reduce_on_plateau"]: # If instance of ReduceLROnPlateau, we need a monitor monitor_key = lr_scheduler["monitor"] monitor_val = self._get_monitor_value(monitor_key) if monitor_val is None: if lr_scheduler.get("strict", True): avail_metrics = list(self.trainer.callback_metrics) raise MisconfigurationException( f"ReduceLROnPlateau conditioned on metric {monitor_key}" f" which is not available. Available metrics are: {avail_metrics}." " Condition can be set using `monitor` key in lr scheduler dict" ) rank_zero_warn( f"ReduceLROnPlateau conditioned on metric {monitor_key}" " which is not available but strict is set to `False`." " Skipping learning rate update.", RuntimeWarning, ) continue self.scheduler_progress.increment_ready() # update LR if lr_scheduler["reduce_on_plateau"]: lr_scheduler["scheduler"].step(monitor_val) else: lr_scheduler["scheduler"].step() self.scheduler_progress.increment_completed() def _get_monitor_value(self, key: str) -> Any: # this is a separate method to aid in testing return self.trainer.callback_metrics.get(key) def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool) -> bool: """Decide if we should run validation.""" if not self.trainer.enable_validation: return False is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0 if not is_val_check_epoch: return False # val_check_batch is inf for iterable datasets with no length defined is_infinite_dataset = self.trainer.val_check_batch == float("inf") if is_last_batch and is_infinite_dataset: return True if self.trainer.should_stop: return True # TODO(@awaelchli): let training/eval loop handle logic around limit_*_batches and val_check_batch is_val_check_batch = is_last_batch if isinstance(self.trainer.limit_train_batches, int) and is_infinite_dataset: is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0 elif self.trainer.val_check_batch != float("inf"): is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0 return is_val_check_batch def _save_loggers_on_train_batch_end(self) -> None: """Flushes loggers to disk.""" # when loggers should save to disk should_flush_logs = self.trainer.logger_connector.should_flush_logs if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None: self.trainer.logger.save() def _reload_dataloader_state_dict(self, data_fetcher: AbstractDataFetcher): if self._dataloader_state_dict: data_fetcher.dataloader.load_state_dict(self._dataloader_state_dict) self._dataloader_state_dict = None
class PredictionEpochLoop(Loop): """Loop performing prediction on arbitrary sequentially used dataloaders.""" def __init__(self) -> None: super().__init__() self.return_predictions: bool = False self.predictions: List[Any] = [] self.current_batch_indices: List[int] = [] self._dl_max_batches: Optional[int] = None self._num_dataloaders: Optional[int] = None self._warning_cache = WarningCache() self._all_batch_indices: List[int] = [] @property def done(self) -> bool: """Ends prediction when the iteration count exceeds the total number of available batches""" return self.iteration_count >= self._dl_max_batches @property def should_store_predictions(self) -> bool: """Whether the predictions should be stored for later usage (e.g. aggregation or returning)""" any_pred = any(cb.interval.on_epoch for cb in self.trainer.prediction_writer_callbacks) return self.return_predictions or any_pred def reset(self) -> None: """Resets the loops internal state""" self.iteration_count = 0 self._all_batch_indices: List[int] = [] self.predictions: List[Any] = [] def on_run_start(self, dataloader_iter: Iterator, dataloader_idx: int, dl_max_batches: int, num_dataloaders: int, return_predictions: bool = False) -> None: """ Prepares the loops internal state Args: dataloader_iter: the iterator over the current dataloader dataloader_idx: the index of the current dataloader dl_max_batches: the maximum number of batches the current loader can produce num_dataloaders: the total number of dataloaders return_predictions: whether to return the obtained predictions """ void(dataloader_iter, dataloader_idx) self._dl_max_batches = dl_max_batches self._num_dataloaders = num_dataloaders self.return_predictions = return_predictions def advance(self, dataloader_iter: Iterator, dataloader_idx: int, dl_max_batches: int, num_dataloaders: int, return_predictions: bool = False) -> None: """ Runs one prediction step. Args: dataloader_iter: the iterator over the current dataloader dataloader_idx: the index of the current dataloader dl_max_batches: the maximum number of batches the current loader can produce num_dataloaders: the total number of dataloaders return_predictions: whether to return the obtained predictions """ batch_idx, batch = next(dataloader_iter) if batch is None: raise StopIteration with self.trainer.profiler.profile("predict_step"): self._predict_step(batch, batch_idx, dataloader_idx) def on_run_end(self) -> Tuple[Any, Any]: """Returns the predictions and the corresponding batch indices""" return self.predictions, self._all_batch_indices def teardown(self) -> None: """Frees memory of collected predictions.""" self.predictions = [] self._all_batch_indices = [] def _predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int) -> None: """Runs the actual predict step together with all the necessary bookkeeping and the hooks tied to the predict step. Args: batch: the current batch to run the prediction on batch_idx: the index of the current batch dataloader_idx: the index of the dataloader producing the current batch """ # configure step_kwargs step_kwargs = self._build_kwargs(batch, batch_idx, dataloader_idx) # extract batch_indices and store them self._store_batch_indices(dataloader_idx) model_ref = self.trainer.lightning_module self.trainer.call_hook("on_predict_batch_start", batch, batch_idx, dataloader_idx) model_ref._current_fx_name = "predict_step" predictions = self.trainer.accelerator.predict_step(step_kwargs) if predictions is None: self._warning_cache.warn( "predict returned None if it was on purpose, ignore this warning..." ) self.trainer.call_hook("on_predict_batch_end", predictions, batch, batch_idx, dataloader_idx) if self.should_store_predictions: self.predictions.append(predictions) def _build_kwargs(self, batch: Any, batch_idx: int, dataloader_idx: int) -> Dict[str, Any]: """ Assembles the keyword arguments for the ``predict_step`` Args: batch: the current batch to run the prediction on batch_idx: the index of the current batch dataloader_idx: the index of the dataloader producing the current batch Returns: the dictionary containing all the keyboard arguments for the predict step """ step_kwargs = OrderedDict([('batch', batch), ('batch_idx', batch_idx)]) if self._num_dataloaders > 1: step_kwargs['dataloader_idx'] = dataloader_idx return step_kwargs def _store_batch_indices(self, dataloader_idx: int) -> None: """Stores the batch indices if the predictions should be stored""" batch_sampler = self.trainer.predict_dataloaders[ dataloader_idx].batch_sampler if isinstance(batch_sampler, IndexBatchSamplerWrapper): self.current_batch_indices = batch_sampler.batch_indices if self.should_store_predictions: self._all_batch_indices.append(batch_sampler.batch_indices)
class EvaluationLoop(object): def __init__(self, trainer: 'pl.Trainer'): self.trainer: 'pl.Trainer' = trainer self.outputs: EPOCH_OUTPUT = [] self.predictions: Optional[PredictionCollection] = None self.max_batches: Optional[List[Union[int, float]]] = None self.warning_cache = WarningCache() self.num_dataloaders: Optional[int] = None def on_trainer_init(self) -> None: self.trainer.num_sanity_val_batches = [] self.trainer.num_test_batches = [] self.trainer.num_val_batches = [] self.trainer.test_dataloaders = None self.trainer.val_dataloaders = None # .validate() and .test() set this when they load a checkpoint self.trainer.validated_ckpt_path = None self.trainer.tested_ckpt_path = None # when true, print evaluation results in .validate() and .test() self.trainer.verbose_evaluate = True def get_evaluation_dataloaders( self ) -> Tuple[Optional[List[DataLoader]], List[Union[int, float]]]: model = self.trainer.lightning_module # select dataloaders if self.trainer.testing: self.trainer.reset_test_dataloader(model) dataloaders = self.trainer.test_dataloaders max_batches = self.trainer.num_test_batches else: # val if self.trainer.val_dataloaders is None or self.trainer.reload_dataloaders_every_epoch: self.trainer.reset_val_dataloader(model) if self.trainer.sanity_checking: self.trainer.num_sanity_val_batches = [ min(self.trainer.num_sanity_val_steps, val_batches) for val_batches in self.trainer.num_val_batches ] max_batches = self.trainer.num_sanity_val_batches else: max_batches = self.trainer.num_val_batches dataloaders = self.trainer.val_dataloaders return dataloaders, max_batches def should_skip_evaluation(self, max_batches: List[Union[int, float]]) -> bool: return sum(max_batches) == 0 def on_evaluation_start(self, *args: Any, **kwargs: Any) -> None: if self.trainer.testing: self.trainer.call_hook('on_test_start', *args, **kwargs) else: self.trainer.call_hook('on_validation_start', *args, **kwargs) def on_evaluation_model_eval(self) -> None: model_ref = self.trainer.lightning_module if self.trainer.testing: model_ref.on_test_model_eval() else: model_ref.on_validation_model_eval() def on_evaluation_model_train(self) -> None: model_ref = self.trainer.lightning_module if self.trainer.testing: model_ref.on_test_model_train() else: model_ref.on_validation_model_train() def on_evaluation_end(self, *args: Any, **kwargs: Any) -> None: if self.trainer.testing: self.trainer.call_hook('on_test_end', *args, **kwargs) else: self.trainer.call_hook('on_validation_end', *args, **kwargs) if self.trainer.state.fn != TrainerFn.FITTING: # summarize profile results self.trainer.profiler.describe() def reload_evaluation_dataloaders(self) -> None: model = self.trainer.lightning_module if self.trainer.testing: self.trainer.reset_test_dataloader(model) else: self.trainer.reset_val_dataloader(model) def setup(self, max_batches: List[Union[int, float]], dataloaders: List[DataLoader]) -> None: # bookkeeping self.outputs = [] self.predictions = PredictionCollection(self.trainer.global_rank, self.trainer.world_size) # convert max_batches to list if isinstance(max_batches, int): max_batches = [max_batches] * len(dataloaders) self.max_batches = max_batches self.num_dataloaders = self._get_num_dataloaders(dataloaders) def on_evaluation_epoch_start(self, *args: Any, **kwargs: Any) -> None: self.trainer.call_hook('on_epoch_start', *args, **kwargs) if self.trainer.testing: self.trainer.call_hook('on_test_epoch_start', *args, **kwargs) else: self.trainer.call_hook('on_validation_epoch_start', *args, **kwargs) def _build_args(self, batch: Any, batch_idx: int, dataloader_idx: int) -> List[Union[Any, int]]: # make dataloader_idx arg in validation_step optional args = [batch, batch_idx] multiple_val_loaders = ( not self.trainer.testing and self._get_num_dataloaders(self.trainer.val_dataloaders) > 1) multiple_test_loaders = ( self.trainer.testing and self._get_num_dataloaders(self.trainer.test_dataloaders) > 1) if multiple_test_loaders or multiple_val_loaders: args.append(dataloader_idx) return args def _get_num_dataloaders(self, dataloaders: Optional[List[DataLoader]]) -> int: # case where user does: # return dl1, dl2 if dataloaders is not None: length = len(dataloaders) if len(dataloaders) > 0 and isinstance(dataloaders[0], (list, tuple)): length = len(dataloaders[0]) return length else: return 0 def evaluation_step(self, batch: Any, batch_idx: int, dataloader_idx: int) -> Optional[STEP_OUTPUT]: # configure args args = self._build_args(batch, batch_idx, dataloader_idx) model_ref = self.trainer.lightning_module model_ref._results = Result() if self.trainer.testing: model_ref._current_fx_name = "test_step" with self.trainer.profiler.profile("test_step"): output = self.trainer.accelerator.test_step(args) else: model_ref._current_fx_name = "validation_step" with self.trainer.profiler.profile("validation_step"): output = self.trainer.accelerator.validation_step(args) # capture any logged information self.trainer.logger_connector.cache_logged_metrics() # track batch size for weighted average if isinstance(output, Result): output.track_batch_size(batch) return output def evaluation_step_end(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]: if self.trainer.testing: output = self.trainer.call_hook('test_step_end', *args, **kwargs) else: output = self.trainer.call_hook('validation_step_end', *args, **kwargs) return output def evaluation_epoch_end(self, outputs: EPOCH_OUTPUT) -> None: # unset dataloder_idx in model self.trainer.logger_connector.evaluation_epoch_end() # call the model epoch end model = self.trainer.lightning_module if self.trainer.testing: if is_overridden('test_epoch_end', model=model): model._current_fx_name = 'test_epoch_end' model.test_epoch_end(outputs) else: if is_overridden('validation_epoch_end', model=model): model._current_fx_name = 'validation_epoch_end' model.validation_epoch_end(outputs) # capture logging self.trainer.logger_connector.cache_logged_metrics() def on_evaluation_batch_start(self, batch: Any, batch_idx: int, dataloader_idx: int) -> None: # set dataloader_idx to model and track batch_size self.trainer.logger_connector.on_evaluation_batch_start( batch, dataloader_idx, self.num_dataloaders) if self.trainer.testing: self.trainer.call_hook('on_test_batch_start', batch, batch_idx, dataloader_idx) else: self.trainer.call_hook('on_validation_batch_start', batch, batch_idx, dataloader_idx) def on_evaluation_batch_end( self, output: Optional[STEP_OUTPUT], batch: Any, batch_idx: int, dataloader_idx: int, ) -> None: if self.trainer.testing: self.trainer.call_hook('on_test_batch_end', output, batch, batch_idx, dataloader_idx) else: self.trainer.call_hook('on_validation_batch_end', output, batch, batch_idx, dataloader_idx) # store predicitons if do_write_predictions and track eval loss history self.store_predictions(output, batch_idx, dataloader_idx) def store_predictions(self, output: Optional[STEP_OUTPUT], batch_idx: int, dataloader_idx: int) -> None: # Add step predictions to prediction collection to write later if output is not None and self.predictions is not None: if isinstance(output, Result) and self.trainer.testing: self.predictions.add(output.pop('predictions', None)) # track debug metrics self.trainer.dev_debugger.track_eval_loss_history( batch_idx, dataloader_idx, output) def on_evaluation_epoch_end( self, outputs: Union[List[List[Dict]], List[Dict]]) -> None: model_ref = self.trainer.lightning_module hook_name = "on_test_epoch_end" if self.trainer.testing else "on_validation_epoch_end" self.trainer._reset_result_and_set_hook_fx_name(hook_name) with self.trainer.profiler.profile(hook_name): if hasattr(self.trainer, hook_name): on_evaluation_epoch_end_hook = getattr(self.trainer, hook_name) on_evaluation_epoch_end_hook(outputs) if is_overridden(hook_name, model_ref): model_hook_fx = getattr(model_ref, hook_name) if is_param_in_hook_signature(model_hook_fx, "outputs"): model_hook_fx(outputs) else: self.warning_cache.warn( f"`ModelHooks.{hook_name}` signature has changed in v1.3. `outputs` parameter has been added." " Support for the old signature will be removed in v1.5", DeprecationWarning) model_hook_fx() self.trainer._cache_logged_metrics() self.trainer.call_hook('on_epoch_end') def log_evaluation_step_metrics(self, batch_idx: int) -> None: if self.trainer.sanity_checking: return cached_results = self.trainer.logger_connector.cached_results if cached_results is not None: cached_batch_pbar_metrics, cached_batch_log_metrics = cached_results.update_logger_connector( ) if len(cached_batch_log_metrics) > 0: # make the metrics appear as a different line in the same graph metrics_by_epoch = {} for k, v in cached_batch_log_metrics.items(): metrics_by_epoch[ f'{k}/epoch_{self.trainer.current_epoch}'] = v self.trainer.logger_connector.log_metrics(metrics_by_epoch, {}, step=batch_idx) if len(cached_batch_pbar_metrics) > 0: self.trainer.logger_connector.add_progress_bar_metrics( cached_batch_pbar_metrics)
class TrainingEpochLoop(loops.Loop[_OUTPUTS_TYPE]): """Runs over all batches in a dataloader (one epoch). Args: min_steps: The minimum number of steps (batches) to process max_steps: The maximum number of steps (batches) to process """ def __init__(self, min_steps: Optional[int] = None, max_steps: int = -1) -> None: super().__init__() if max_steps is None: rank_zero_deprecation( "Setting `max_steps = None` is deprecated in v1.5 and will no longer be supported in v1.7." " Use `max_steps = -1` instead.") max_steps = -1 elif max_steps < -1: raise MisconfigurationException( f"`max_steps` must be a non-negative integer or -1 (infinite steps). You passed in {max_steps}." ) self.min_steps = min_steps self.max_steps = max_steps self.batch_progress = BatchProgress() self.scheduler_progress = SchedulerProgress() self.batch_loop = TrainingBatchLoop() self.val_loop = loops.EvaluationLoop(verbose=False) self._results = _ResultCollection(training=True) self._outputs: _OUTPUTS_TYPE = [] self._warning_cache = WarningCache() # caches the loaded dataloader state until dataloader objects are available self._dataloader_state_dict: Dict[str, Any] = {} self._batches_that_stepped: int = 0 @property def total_batch_idx(self) -> int: """Returns the current batch index (across epochs)""" # use `ready` instead of `completed` in case this is accessed after `completed` has been increased # but before the next `ready` increase return self.batch_progress.total.ready - 1 @property def batch_idx(self) -> int: """Returns the current batch index (within this epoch)""" # use `ready` instead of `completed` in case this is accessed after `completed` has been increased # but before the next `ready` increase return self.batch_progress.current.ready - 1 @property def global_step(self) -> int: lightning_module = self.trainer.lightning_module if lightning_module is None or lightning_module.automatic_optimization: return self.batch_loop.optimizer_loop.optim_progress.optimizer_steps return self.batch_loop.manual_loop.optim_step_progress.total.completed @property def _is_training_done(self) -> bool: max_steps_reached = _is_max_limit_reached(self.global_step, self.max_steps) return max_steps_reached or self._num_ready_batches_reached() @property def _is_validation_done(self) -> bool: # when we are restarting we want to check whether the val loop has finished return not self.restarting or self.val_loop.done @property def done(self) -> bool: """Evaluates when to leave the loop.""" return (self._is_training_done and self._is_validation_done) or self.trainer.should_stop def connect( # type: ignore[override] self, batch_loop: Optional[TrainingBatchLoop] = None, val_loop: Optional["loops.EvaluationLoop"] = None, ) -> None: """Optionally connect a custom batch or validation loop to this training epoch loop.""" if batch_loop is not None: self.batch_loop = batch_loop if val_loop is not None: self.val_loop = val_loop def reset(self) -> None: """Resets the internal state of the loop for a new run.""" if self.restarting: self.batch_progress.reset_on_restart() self.scheduler_progress.reset_on_restart() self.batch_loop.optimizer_loop.optim_progress.reset_on_restart() trainer = self.trainer if not trainer.state._fault_tolerant_mode.is_enabled and trainer.num_training_batches != float( "inf"): expected_steps = math.ceil(trainer.num_training_batches / trainer.accumulate_grad_batches) if self.global_step % expected_steps != 0: rank_zero_warn( "You're resuming from a checkpoint that ended before the epoch ended. This can cause unreliable" " results if further training is done. Consider using an end-of-epoch checkpoint or enabling" " fault-tolerant training:" " https://pytorch-lightning.readthedocs.io/en/stable/advanced/fault_tolerant_training.html" ) else: self.batch_progress.reset_on_run() self.scheduler_progress.reset_on_run() self.batch_loop.optimizer_loop.optim_progress.reset_on_run() # when the epoch starts, the total val batch progress should be reset as it's supposed to count the batches # seen per epoch, this is useful for tracking when validation is run multiple times per epoch self.val_loop.epoch_loop.batch_progress.total.reset() self._outputs = [] def on_run_start( self, data_fetcher: AbstractDataFetcher ) -> None: # type: ignore[override] self._reload_dataloader_state_dict(data_fetcher) _ = iter(data_fetcher) # creates the iterator inside the fetcher # add the previous `fetched` value to properly track `is_last_batch` with no prefetching data_fetcher.fetched += self.batch_progress.current.ready def advance( self, data_fetcher: AbstractDataFetcher ) -> None: # type: ignore[override] """Runs a single training batch. Raises: StopIteration: When the epoch is canceled by the user returning -1 """ if self.restarting and self._should_check_val_fx( self.batch_idx, self.batch_progress.is_last_batch): # skip training and run validation in `on_advance_end` return # we are going to train first so the val loop does not need to restart self.val_loop.restarting = False if not isinstance(data_fetcher, DataLoaderIterDataFetcher): batch_idx = self.batch_idx + 1 batch = next(data_fetcher) else: batch_idx, batch = next(data_fetcher) self.batch_progress.is_last_batch = data_fetcher.done self.batch_progress.increment_ready() self.trainer._logger_connector.on_batch_start(batch, batch_idx) if batch is None: self._warning_cache.warn( "train_dataloader yielded None. If this was on purpose, ignore this warning..." ) batch_output = [] else: # hook self.trainer._call_callback_hooks("on_batch_start") # TODO: Update this in v1.7 (deprecation: #9816) model_fx = self.trainer.lightning_module.on_train_batch_start extra_kwargs = ({ "dataloader_idx": 0 } if callable(model_fx) and is_param_in_hook_signature( model_fx, "dataloader_idx", explicit=True) else {}) # hook self.trainer._call_callback_hooks("on_train_batch_start", batch, batch_idx, **extra_kwargs) response = self.trainer._call_lightning_module_hook( "on_train_batch_start", batch, batch_idx, **extra_kwargs) self.trainer._call_strategy_hook("on_train_batch_start", batch, batch_idx, **extra_kwargs) if response == -1: self.batch_progress.increment_processed() raise StopIteration self.batch_progress.increment_started() with self.trainer.profiler.profile("run_training_batch"): batch_output = self.batch_loop.run(batch, batch_idx) self.batch_progress.increment_processed() # update non-plateau LR schedulers # update epoch-interval ones only when we are at the end of training epoch self.update_lr_schedulers("step", update_plateau_schedulers=False) if self._num_ready_batches_reached(): self.update_lr_schedulers("epoch", update_plateau_schedulers=False) batch_end_outputs = self._prepare_outputs_training_batch_end( batch_output, lightning_module=self.trainer.lightning_module, num_optimizers=len(self.trainer.optimizers), ) # TODO: Update this in v1.7 (deprecation: #9816) model_fx = self.trainer.lightning_module.on_train_batch_end extra_kwargs = ({ "dataloader_idx": 0 } if callable(model_fx) and is_param_in_hook_signature( model_fx, "dataloader_idx", explicit=True) else {}) self.trainer._call_callback_hooks("on_train_batch_end", batch_end_outputs, batch, batch_idx, **extra_kwargs) self.trainer._call_lightning_module_hook("on_train_batch_end", batch_end_outputs, batch, batch_idx, **extra_kwargs) self.trainer._call_callback_hooks("on_batch_end") self.trainer._logger_connector.on_batch_end() self.batch_progress.increment_completed() if is_overridden("training_epoch_end", self.trainer.lightning_module): self._outputs.append(batch_output) # ----------------------------------------- # SAVE METRICS TO LOGGERS AND PROGRESS_BAR # ----------------------------------------- self.trainer._logger_connector.update_train_step_metrics() def on_advance_end(self) -> None: # ----------------------------------------- # VALIDATE IF NEEDED # ----------------------------------------- should_check_val = self._should_check_val_fx( self.batch_idx, self.batch_progress.is_last_batch) if should_check_val: self.trainer.validating = True self._run_validation() self.trainer.training = True # update plateau LR scheduler after metrics are logged self.update_lr_schedulers("step", update_plateau_schedulers=True) if not self._should_accumulate(): # this is increased once per batch disregarding multiple optimizers or tbptt on purpose for loggers self._batches_that_stepped += 1 # this will save based on the `batches_that_stepped` value self._save_loggers_on_train_batch_end() # if training finished, defer exit to the parent. this assumes there will be enough time in between # which might not be the case depending on what's in the `*_epoch_end` hooks if not self._is_training_done: # if fault tolerant is enabled and process has been notified, exit. self.trainer._exit_gracefully_on_signal() def on_run_end(self) -> _OUTPUTS_TYPE: outputs, self._outputs = self._outputs, [] return outputs def teardown(self) -> None: self._results.cpu() self.batch_loop.teardown() self.val_loop.teardown() def on_save_checkpoint(self) -> Dict: state_dict = super().on_save_checkpoint() if (self.trainer is not None and self.trainer.state._fault_tolerant_mode.is_enabled and self.trainer.train_dataloader is not None and not self._num_completed_batches_reached() # did not finish # TODO: fault-tolerance requires a minimum number of batches so probably should be > 0 and self.batch_progress.current.ready # did start ): loader: CombinedLoader = self.trainer.train_dataloader state = loader.state_dict(has_completed=self._has_completed()) if state: state_dict[ "dataloader_state_dict"] = _collect_states_on_rank_zero_over_collection( state) return state_dict def on_load_checkpoint(self, state_dict: Dict) -> None: # cache the dataloader state dict until the dataloader objects are available self._dataloader_state_dict = state_dict.get("dataloader_state_dict") def _run_validation(self) -> None: # reload dataloaders self.val_loop._reload_evaluation_dataloaders() with torch.no_grad(): self.val_loop.run() def _accumulated_batches_reached(self) -> bool: """Determine if accumulation will be finished by the end of the current batch.""" return self.batch_progress.current.ready % self.trainer.accumulate_grad_batches == 0 def _num_ready_batches_reached(self) -> bool: """Checks if we are in the last batch or if there are more batches to follow.""" epoch_finished_on_ready = self.batch_progress.current.ready == self.trainer.num_training_batches return epoch_finished_on_ready or self.batch_progress.is_last_batch def _num_completed_batches_reached(self) -> bool: epoch_finished_on_completed = self.batch_progress.current.completed == self.trainer.num_training_batches dataloader_consumed_successfully = self.batch_progress.is_last_batch and self._has_completed( ) return epoch_finished_on_completed or dataloader_consumed_successfully def _has_completed(self) -> bool: return self.batch_progress.current.ready == self.batch_progress.current.completed def _should_accumulate(self) -> bool: """Checks if the optimizer step should be performed or gradients should be accumulated for the current step.""" accumulation_done = self._accumulated_batches_reached() # Lightning steps on the final batch is_final_batch = self._num_ready_batches_reached() # but the strategy might not strategy_accumulates_on_final_batch = self.trainer.strategy.handles_gradient_accumulation or not is_final_batch return not accumulation_done and strategy_accumulates_on_final_batch @staticmethod def _prepare_outputs_training_batch_end( batch_output: _BATCH_OUTPUTS_TYPE, lightning_module: "pl.LightningModule", num_optimizers: int, ) -> Union[List[List[Dict[str, Any]]], List[Dict[str, Any]]]: """Processes the outputs from the batch loop into the format passed to the ``on_train_batch_end`` hook.""" if not batch_output: return [] # convert optimizer dicts to list if lightning_module.automatic_optimization: batch_output = apply_to_collection(batch_output, dtype=dict, function=_convert_optim_dict, num_optimizers=num_optimizers) array = np.array(batch_output, dtype=object) # TODO: remove in v1.8 if (num_optimizers > 1 and lightning_module.truncated_bptt_steps > 0 and not _v1_8_output_format(lightning_module.on_train_batch_end)): rank_zero_deprecation( "You are training with multiple optimizers AND truncated backpropagation through time enabled." " The current format of the `on_train_batch_end(outputs, ...)` is a 2d list with sizes" " (n_optimizers, tbptt_steps), however, this has been deprecated and will change in version v1.8 to" " (tbptt_steps, n_optimizers). You can update your code by adding the following parameter to your" " hook signature: `on_train_batch_end(outputs, ..., new_format=True)`." ) # (tbptt_steps, n_opt) -> (n_opt, tbptt_steps) if array.ndim == 1: array = np.expand_dims(array, 1) array = array.transpose((1, 0)) # squeeze all single-element dimensions array = array.squeeze() array = array.tolist() array = _recursive_unpad(array) return array @staticmethod def _prepare_outputs_training_epoch_end( batch_outputs: _OUTPUTS_TYPE, lightning_module: "pl.LightningModule", num_optimizers: int, ) -> Union[List[List[List[Dict[str, Any]]]], List[List[Dict[str, Any]]], List[Dict[str, Any]]]: """Processes the outputs from the batch loop into the format passed to the ``training_epoch_end`` hook.""" # `batch_outputs` (plural) is the same as `epoch_end_output` (singular) if not batch_outputs: return [] # convert optimizer dicts to list if lightning_module.automatic_optimization: batch_outputs = apply_to_collection(batch_outputs, dtype=dict, function=_convert_optim_dict, num_optimizers=num_optimizers) array = _recursive_pad(batch_outputs) # TODO: remove in v1.8 if (num_optimizers > 1 and lightning_module.truncated_bptt_steps > 0 and not _v1_8_output_format(lightning_module.on_train_epoch_end)): rank_zero_deprecation( "You are training with multiple optimizers AND truncated backpropagation through time enabled." " The current format of the `training_epoch_end(outputs)` is a 3d list with sizes" " (n_optimizers, n_batches, tbptt_steps), however, this has been deprecated and will change in version" " v1.8 to (n_batches, tbptt_steps, n_optimizers). You can update your code by adding the following" " parameter to your hook signature: `training_epoch_end(outputs, new_format=True)`." ) # (n_batches, tbptt_steps, n_opt) -> (n_opt, n_batches, tbptt_steps) if array.ndim == 2: array = np.expand_dims(array, 2) array = array.transpose((2, 0, 1)) # squeeze all single-element dimensions array = array.squeeze() array = array.tolist() array = _recursive_unpad(array) # in case we squeezed from 1-element array to a 0-dim array array = array if isinstance(array, list) else [array] # remove residual empty lists array = [ item for item in array if not isinstance(item, list) or len(item) ] return array def update_lr_schedulers(self, interval: str, update_plateau_schedulers: bool) -> None: """updates the lr schedulers based on the given interval.""" if interval == "step" and self._should_accumulate(): return active_optimizers = _get_active_optimizers( self.trainer.optimizers, self.trainer.optimizer_frequencies, self.total_batch_idx) self._update_learning_rates( interval=interval, update_plateau_schedulers=update_plateau_schedulers, opt_indices=[opt_idx for opt_idx, _ in active_optimizers], ) def _update_learning_rates( self, interval: str, update_plateau_schedulers: bool, opt_indices: Optional[List[int]] = None) -> None: """Update learning rates. Args: interval: either 'epoch' or 'step'. update_plateau_schedulers: control whether ``ReduceLROnPlateau`` or non-plateau schedulers get updated. This is used so non-plateau schedulers can be updated before running validation. Checkpoints are commonly saved during validation, however, on-plateau schedulers might monitor a validation metric so they have to be updated separately. opt_indices: indices of the optimizers to update. """ if not self.trainer.lr_scheduler_configs or not self.trainer.lightning_module.automatic_optimization: return if opt_indices is None: opt_indices = [] for config in self.trainer.lr_scheduler_configs: if config.opt_idx not in opt_indices: continue if update_plateau_schedulers ^ config.reduce_on_plateau: continue current_idx = self.batch_idx if interval == "step" else self.trainer.current_epoch current_idx += 1 # account for both batch and epoch starts from 0 # Take step if call to update_learning_rates matches the interval key and # the current step modulo the schedulers frequency is zero if config.interval == interval and current_idx % config.frequency == 0: monitor_val = None if config.reduce_on_plateau: # If instance of ReduceLROnPlateau, we need a monitor monitor_key = config.monitor monitor_val = self._get_monitor_value(monitor_key) if monitor_val is None: if config.strict: avail_metrics = list(self.trainer.callback_metrics) raise MisconfigurationException( f"ReduceLROnPlateau conditioned on metric {monitor_key}" f" which is not available. Available metrics are: {avail_metrics}." " Condition can be set using `monitor` key in lr scheduler dict" ) rank_zero_warn( f"ReduceLROnPlateau conditioned on metric {monitor_key}" " which is not available but strict is set to `False`." " Skipping learning rate update.", category=RuntimeWarning, ) continue self.scheduler_progress.increment_ready() # update LR self.trainer._call_lightning_module_hook( "lr_scheduler_step", config.scheduler, config.opt_idx, monitor_val, ) self.scheduler_progress.increment_completed() def _get_monitor_value(self, key: str) -> Any: # this is a separate method to aid in testing return self.trainer.callback_metrics.get(key) def _should_check_val_epoch(self): return (self.trainer.enable_validation and (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0) def _should_check_val_fx(self, batch_idx: int, is_last_batch: bool) -> bool: """Decide if we should run validation.""" if not self._should_check_val_epoch(): return False # val_check_batch is inf for iterable datasets with no length defined is_infinite_dataset = self.trainer.val_check_batch == float("inf") if is_last_batch and is_infinite_dataset: return True if self.trainer.should_stop: return True # TODO(@awaelchli): let training/eval loop handle logic around limit_*_batches and val_check_batch is_val_check_batch = is_last_batch if isinstance(self.trainer.limit_train_batches, int) and is_infinite_dataset: is_val_check_batch = (batch_idx + 1) % self.trainer.limit_train_batches == 0 elif self.trainer.val_check_batch != float("inf"): is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0 return is_val_check_batch def _save_loggers_on_train_batch_end(self) -> None: """Flushes loggers to disk.""" # this assumes that `batches_that_stepped` was increased before should_flush = self._batches_that_stepped % self.trainer.flush_logs_every_n_steps == 0 if should_flush or self.trainer.should_stop: for logger in self.trainer.loggers: logger.save() def _reload_dataloader_state_dict( self, data_fetcher: AbstractDataFetcher) -> None: if self._dataloader_state_dict: data_fetcher.dataloader.load_state_dict( self._dataloader_state_dict) self._dataloader_state_dict = None
class PredictionEpochLoop(Loop): """Loop performing prediction on arbitrary sequentially used dataloaders.""" def __init__(self) -> None: super().__init__() self.return_predictions: bool = False self.predictions: List[Any] = [] self.current_batch_indices: List[int] = [] self.batch_progress = Progress() self._dl_max_batches: Optional[int] = None self._num_dataloaders: Optional[int] = None self._warning_cache = WarningCache() self._all_batch_indices: List[int] = [] @property def done(self) -> bool: """Ends prediction when the iteration count exceeds the total number of available batches.""" return self.batch_progress.current.completed >= self._dl_max_batches @property def should_store_predictions(self) -> bool: """Whether the predictions should be stored for later usage (e.g. aggregation or returning)""" any_pred = any(cb.interval.on_epoch for cb in self.trainer.prediction_writer_callbacks) return self.return_predictions or any_pred def connect(self, **kwargs: "Loop") -> None: raise NotImplementedError( f"{self.__class__.__name__} does not connect any child loops.") def reset(self) -> None: """Resets the loops internal state.""" self._all_batch_indices: List[int] = [] self.predictions: List[Any] = [] self.batch_progress.reset_on_run() def on_run_start( self, dataloader_iter: Iterator, dataloader_idx: int, dl_max_batches: int, num_dataloaders: int, return_predictions: bool = False, ) -> None: """Prepares the loops internal state. Args: dataloader_iter: the iterator over the current dataloader dataloader_idx: the index of the current dataloader dl_max_batches: the maximum number of batches the current loader can produce num_dataloaders: the total number of dataloaders return_predictions: whether to return the obtained predictions """ void(dataloader_iter, dataloader_idx) self._dl_max_batches = dl_max_batches self._num_dataloaders = num_dataloaders self.return_predictions = return_predictions def advance( self, dataloader_iter: Iterator, dataloader_idx: int, dl_max_batches: int, num_dataloaders: int, return_predictions: bool = False, ) -> None: """Runs one prediction step. Args: dataloader_iter: the iterator over the current dataloader dataloader_idx: the index of the current dataloader dl_max_batches: the maximum number of batches the current loader can produce num_dataloaders: the total number of dataloaders return_predictions: whether to return the obtained predictions """ batch_idx, batch = next(dataloader_iter) if batch is None: raise StopIteration with self.trainer.profiler.profile("predict_batch_to_device"): batch = self.trainer.accelerator.batch_to_device( batch, dataloader_idx=dataloader_idx) self.batch_progress.increment_ready() with self.trainer.profiler.profile("predict_step"): self._predict_step(batch, batch_idx, dataloader_idx) def on_run_end(self) -> Tuple[List[Any], List[int]]: """Returns the predictions and the corresponding batch indices.""" predictions = self.predictions all_batch_indices = self._all_batch_indices # free memory self.predictions = [] self._all_batch_indices = [] return predictions, all_batch_indices def _predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int) -> None: """Runs the actual predict step together with all the necessary bookkeeping and the hooks tied to the predict step. Args: batch: the current batch to run the prediction on batch_idx: the index of the current batch dataloader_idx: the index of the dataloader producing the current batch """ # configure step_kwargs step_kwargs = self._build_kwargs(batch, batch_idx, dataloader_idx) # extract batch_indices and store them self._store_batch_indices(dataloader_idx) model_ref = self.trainer.lightning_module self.trainer.call_hook("on_predict_batch_start", batch, batch_idx, dataloader_idx) self.batch_progress.increment_started() model_ref._current_fx_name = "predict_step" predictions = self.trainer.accelerator.predict_step(step_kwargs) self.batch_progress.increment_processed() if predictions is None: self._warning_cache.warn( "predict returned None if it was on purpose, ignore this warning..." ) self.trainer.call_hook("on_predict_batch_end", predictions, batch, batch_idx, dataloader_idx) self.batch_progress.increment_completed() if self.should_store_predictions: self.predictions.append( move_data_to_device(predictions, torch.device("cpu"))) def _build_kwargs(self, batch: Any, batch_idx: int, dataloader_idx: int) -> Dict[str, Any]: """Assembles the keyword arguments for the ``predict_step`` Args: batch: the current batch to run the prediction on batch_idx: the index of the current batch dataloader_idx: the index of the dataloader producing the current batch Returns: the dictionary containing all the keyboard arguments for the predict step """ step_kwargs = OrderedDict([("batch", batch), ("batch_idx", batch_idx)]) if self._num_dataloaders > 1: step_kwargs["dataloader_idx"] = dataloader_idx return step_kwargs def _store_batch_indices(self, dataloader_idx: int) -> None: """Stores the batch indices if the predictions should be stored.""" batch_sampler = self.trainer.predict_dataloaders[ dataloader_idx].batch_sampler if isinstance(batch_sampler, IndexBatchSamplerWrapper): self.current_batch_indices = batch_sampler.batch_indices if self.should_store_predictions: self._all_batch_indices.append(batch_sampler.batch_indices) else: warning_cache.warn( "Lightning couldn't infer the indices fetched for your dataloader." )
class EvaluationLoop(object): def __init__(self, trainer): self.trainer = trainer self.outputs = [] self.step_metrics = [] self.predictions = None self.max_batches = None self.warning_cache = WarningCache() self.num_dataloaders = None def on_trainer_init(self): self.trainer.num_sanity_val_batches = [] self.trainer.num_test_batches = [] self.trainer.num_val_batches = [] self.trainer.test_dataloaders = None self.trainer.val_dataloaders = None # .validate() and .test() set this when they load a checkpoint self.trainer.validated_ckpt_path = None self.trainer.tested_ckpt_path = None # when true, print evaluation results in .validate() and .test() self.trainer.verbose_evaluate = True def get_evaluation_dataloaders(self): model = self.trainer.lightning_module # select dataloaders if self.trainer.testing: self.trainer.reset_test_dataloader(model) dataloaders = self.trainer.test_dataloaders max_batches = self.trainer.num_test_batches else: # val if self.trainer.val_dataloaders is None or self.trainer.reload_dataloaders_every_epoch: self.trainer.reset_val_dataloader(model) if self.trainer.sanity_checking: self.trainer.num_sanity_val_batches = [ min(self.trainer.num_sanity_val_steps, val_batches) for val_batches in self.trainer.num_val_batches ] max_batches = self.trainer.num_sanity_val_batches else: max_batches = self.trainer.num_val_batches dataloaders = self.trainer.val_dataloaders return dataloaders, max_batches def should_skip_evaluation(self, max_batches): return sum(max_batches) == 0 def on_evaluation_start(self, *args, **kwargs): if self.trainer.testing: self.trainer.call_hook('on_test_start', *args, **kwargs) else: self.trainer.call_hook('on_validation_start', *args, **kwargs) def on_evaluation_model_eval(self, *_, **__): model_ref = self.trainer.lightning_module if self.trainer.testing: model_ref.on_test_model_eval() else: model_ref.on_validation_model_eval() def on_evaluation_model_train(self, *_, **__): model_ref = self.trainer.lightning_module if self.trainer.testing: model_ref.on_test_model_train() else: model_ref.on_validation_model_train() def on_evaluation_end(self, *args, **kwargs): if self.trainer.testing: self.trainer.call_hook('on_test_end', *args, **kwargs) else: self.trainer.call_hook('on_validation_end', *args, **kwargs) if self.trainer.state != TrainerState.FITTING: # summarize profile results self.trainer.profiler.describe() def reload_evaluation_dataloaders(self): model = self.trainer.lightning_module if self.trainer.testing: self.trainer.reset_test_dataloader(model) else: self.trainer.reset_val_dataloader(model) def setup(self, model, max_batches, dataloaders): # bookkeeping self.outputs = [] self.predictions = PredictionCollection(self.trainer.global_rank, self.trainer.world_size) # convert max_batches to list if isinstance(max_batches, int): max_batches = [max_batches] * len(dataloaders) self.max_batches = max_batches self.num_dataloaders = self._get_num_dataloaders(dataloaders) self._predictions = [[] for _ in range(self.num_dataloaders)] def on_evaluation_epoch_start(self, *args, **kwargs): self.trainer.call_hook('on_epoch_start', *args, **kwargs) if self.trainer.testing: self.trainer.call_hook('on_test_epoch_start', *args, **kwargs) else: self.trainer.call_hook('on_validation_epoch_start', *args, **kwargs) def _build_args(self, batch, batch_idx, dataloader_idx): # make dataloader_idx arg in validation_step optional args = [batch, batch_idx] multiple_val_loaders = ( not self.trainer.testing and self._get_num_dataloaders(self.trainer.val_dataloaders) > 1) multiple_test_loaders = ( self.trainer.testing and self._get_num_dataloaders(self.trainer.test_dataloaders) > 1) if multiple_test_loaders or multiple_val_loaders: args.append(dataloader_idx) return args def _get_num_dataloaders(self, dataloaders): # case where user does: # return dl1, dl2 length = len(dataloaders) if len(dataloaders) > 0 and isinstance(dataloaders[0], (list, tuple)): length = len(dataloaders[0]) return length def evaluation_step(self, batch, batch_idx, dataloader_idx): # configure args args = self._build_args(batch, batch_idx, dataloader_idx) model_ref = self.trainer.lightning_module model_ref._results = Result() if self.trainer.testing: model_ref._current_fx_name = "test_step" with self.trainer.profiler.profile("test_step"): output = self.trainer.accelerator.test_step(args) else: model_ref._current_fx_name = "validation_step" with self.trainer.profiler.profile("validation_step"): output = self.trainer.accelerator.validation_step(args) # capture any logged information self.trainer.logger_connector.cache_logged_metrics() # track batch size for weighted average is_result_obj = isinstance(output, Result) if is_result_obj: output.track_batch_size(batch) return output def evaluation_step_end(self, *args, **kwargs): if self.trainer.testing: output = self.trainer.call_hook('test_step_end', *args, **kwargs) else: output = self.trainer.call_hook('validation_step_end', *args, **kwargs) return output def evaluation_epoch_end(self, outputs): # unset dataloder_idx in model self.trainer.logger_connector.evaluation_epoch_end() # call the model epoch end model = self.trainer.lightning_module if self.trainer.testing: if is_overridden('test_epoch_end', model=model): model._current_fx_name = 'test_epoch_end' model.test_epoch_end(outputs) else: if is_overridden('validation_epoch_end', model=model): model._current_fx_name = 'validation_epoch_end' model.validation_epoch_end(outputs) # capture logging self.trainer.logger_connector.cache_logged_metrics() def __gather_epoch_end_eval_results(self, outputs): eval_results = [] for epoch_output in outputs: result = epoch_output[0].__class__.gather(epoch_output) eval_results.append(result) # with 1 dataloader don't pass in a list if len(eval_results) == 1: eval_results = eval_results[0] return eval_results def __auto_reduce_result_objs(self, outputs): # outputs has a list of results per dataloader eval_results = [] for dl_output in outputs: result = dl_output[0] result = result.__class__.reduce_on_epoch_end(dl_output) eval_results.append(result) return eval_results def on_predict_epoch_end(self): self.trainer._progress_bar_callback.on_test_end( self.trainer, self.trainer.lightning_module) results = self._predictions def _convert_to_numpy(v): return v.cpu().numpy() results = apply_to_collection(results, torch.Tensor, _convert_to_numpy) return results, None def on_evaluation_batch_start(self, batch, batch_idx, dataloader_idx): # set dataloader_idx to model and track batch_size self.trainer.logger_connector.on_evaluation_batch_start( batch, dataloader_idx, self.num_dataloaders) if self.trainer.testing: self.trainer.call_hook('on_test_batch_start', batch, batch_idx, dataloader_idx) else: self.trainer.call_hook('on_validation_batch_start', batch, batch_idx, dataloader_idx) def on_evaluation_batch_end(self, output, batch, batch_idx, dataloader_idx): if self.trainer.testing: self.trainer.call_hook('on_test_batch_end', output, batch, batch_idx, dataloader_idx) else: self.trainer.call_hook('on_validation_batch_end', output, batch, batch_idx, dataloader_idx) # store predicitons if do_write_predictions and track eval loss history self.store_predictions(output, batch_idx, dataloader_idx) def store_predictions(self, output, batch_idx, dataloader_idx): # Add step predictions to prediction collection to write later if output is not None: do_write_predictions = isinstance(output, Result) and self.trainer.testing if do_write_predictions: self.predictions.add(output.pop('predictions', None)) # track debug metrics self.trainer.dev_debugger.track_eval_loss_history( batch_idx, dataloader_idx, output) def on_evaluation_epoch_end( self, outputs: Union[List[List[Dict]], List[Dict]]) -> None: model_ref = self.trainer.lightning_module hook_name = "on_test_epoch_end" if self.trainer.testing else "on_validation_epoch_end" self.trainer._reset_result_and_set_hook_fx_name(hook_name) with self.trainer.profiler.profile(hook_name): if hasattr(self.trainer, hook_name): on_evaluation_epoch_end_hook = getattr(self.trainer, hook_name) on_evaluation_epoch_end_hook(outputs) if is_overridden(hook_name, model_ref): model_hook_fx = getattr(model_ref, hook_name) if is_param_in_hook_signature(model_hook_fx, "outputs"): model_hook_fx(outputs) else: self.warning_cache.warn( f"`ModelHooks.{hook_name}` signature has changed in v1.3. `outputs` parameter has been added." " Support for the old signature will be removed in v1.5", DeprecationWarning) model_hook_fx() self.trainer._cache_logged_metrics() self.trainer.call_hook('on_epoch_end') def log_evaluation_step_metrics(self, output, batch_idx): if self.trainer.sanity_checking: return step_log_metrics = {} step_pbar_metrics = {} self.__log_result_step_metrics(step_log_metrics, step_pbar_metrics, batch_idx) def __log_result_step_metrics(self, step_log_metrics, step_pbar_metrics, batch_idx): cached_results = self.trainer.logger_connector.cached_results cached_batch_pbar_metrics, cached_batch_log_metrics = cached_results.update_logger_connector( ) step_log_metrics.update(cached_batch_log_metrics) step_pbar_metrics.update(cached_batch_pbar_metrics) if len(step_log_metrics) > 0: # make the metrics appear as a different line in the same graph metrics_by_epoch = {} for k, v in step_log_metrics.items(): metrics_by_epoch[f'{k}/epoch_{self.trainer.current_epoch}'] = v self.trainer.logger_connector.log_metrics(metrics_by_epoch, {}, step=batch_idx) if len(step_pbar_metrics) > 0: self.trainer.logger_connector.add_progress_bar_metrics( step_pbar_metrics)