def validation_epoch_end(self, outputs): if self.logger is not None and self.logger.experiment is not None: logger = self.logger.experiment if isinstance(self.logger, LoggerCollection): for logger in self.logger: if isinstance(logger, TensorBoardLogger): logger = logger.experiment break if isinstance(logger, TensorBoardLogger): tacotron2_log_to_tb_func( logger, outputs[0].values(), self.global_step, tag="val", log_images=True, add_audio=False, ) elif isinstance(logger, WandbLogger): tacotron2_log_to_wandb_func( logger, outputs[0].values(), self.global_step, tag="val", log_images=True, add_audio=False, ) avg_loss = torch.stack([ x['val_loss'] for x in outputs ]).mean() # This reduces across batches, not workers! self.log('val_loss', avg_loss)
def validation_epoch_end(self, outputs): if self.logger is not None and self.logger.experiment is not None: tacotron2_log_to_tb_func( self.logger.experiment, outputs[0].values(), self.global_step, tag="val", log_images=True, add_audio=False, ) avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() self.log('val_loss', avg_loss)