Ejemplo n.º 1
0
    def _track_callback_metrics(self, eval_results):
        if len(eval_results) > 0 and (eval_results[0] is None or not isinstance(eval_results[0], Result)):
            return

        flat = {}
        if isinstance(eval_results, list):
            for eval_result in eval_results:
                # with a scalar return, auto set it to "val_loss" for callbacks
                if isinstance(eval_result, torch.Tensor):
                    flat = {'val_loss': eval_result}
                elif isinstance(eval_result, dict):
                    flat = flatten_dict(eval_result)

                # removing val_loss magic word to map to checkpoint + ES callback
                if 'val_loss' in flat:
                    flat['checkpoint_on'] = flat['val_loss']
                    flat['early_stop_on'] = flat['val_loss']
                self.trainer.logger_connector.callback_metrics.update(flat)
                if self.trainer.testing:
                    self.trainer.logger_connector.evaluation_callback_metrics.update(flat)
        else:
            # with a scalar return, auto set it to "val_loss" for callbacks
            if isinstance(eval_results, torch.Tensor):
                flat = {'val_loss': eval_results}
            else:
                flat = flatten_dict(eval_results)

            # removing val_loss magic word to map to checkpoint + ES callback
            if 'val_loss' in flat:
                flat['checkpoint_on'] = flat['val_loss']
                flat['early_stop_on'] = flat['val_loss']

            self.trainer.logger_connector.callback_metrics.update(flat)
            if self.trainer.testing:
                self.trainer.logger_connector.evaluation_callback_metrics.update(flat)
Ejemplo n.º 2
0
    def _track_callback_metrics(self, eval_results):
        if len(eval_results) > 0 and (eval_results[0] is None or
                                      not isinstance(eval_results[0], Result)):
            return

        flat = {}
        if isinstance(eval_results, list):
            for eval_result in eval_results:
                # with a scalar return, auto set it to "val_loss" for callbacks
                if isinstance(eval_result, torch.Tensor):
                    flat = {'val_loss': eval_result}
                elif isinstance(eval_result, dict):
                    flat = flatten_dict(eval_result)

                self.trainer.logger_connector.callback_metrics.update(flat)
                if self.trainer.state in (TrainerState.TESTING,
                                          TrainerState.VALIDATING):
                    self.trainer.logger_connector.evaluation_callback_metrics.update(
                        flat)
        else:
            # with a scalar return, auto set it to "val_loss" for callbacks
            if isinstance(eval_results, torch.Tensor):
                flat = {'val_loss': eval_results}
            else:
                flat = flatten_dict(eval_results)

            self.trainer.logger_connector.callback_metrics.update(flat)
            if self.trainer.state in (TrainerState.TESTING,
                                      TrainerState.VALIDATING):
                self.trainer.logger_connector.evaluation_callback_metrics.update(
                    flat)
    def _log_on_evaluation_epoch_end_metrics(self, eval_results,
                                             using_eval_result):
        if len(eval_results) > 0 and eval_results[0] is None:
            return

        if using_eval_result:
            if isinstance(eval_results, list):
                for eval_result in eval_results:
                    self.trainer.logger_connector.callback_metrics.update(
                        eval_result.callback_metrics)
            else:
                self.trainer.logger_connector.callback_metrics.update(
                    eval_results.callback_metrics)
        else:
            if isinstance(eval_results, list):
                for eval_result in eval_results:
                    # with a scalar return, auto set it to "val_loss" for callbacks
                    if isinstance(eval_result, torch.Tensor):
                        flat = {'val_loss': eval_result}
                    else:
                        flat = flatten_dict(eval_result)

                    # removing val_loss magic word to map to checkpoint + ES callback
                    if 'val_loss' in flat:
                        flat['checkpoint_on'] = flat['val_loss']
                        flat['early_stop_on'] = flat['val_loss']
                    self.trainer.logger_connector.callback_metrics.update(flat)
            else:
                # with a scalar return, auto set it to "val_loss" for callbacks
                if isinstance(eval_results, torch.Tensor):
                    flat = {'val_loss': eval_results}
                else:
                    flat = flatten_dict(eval_results)

                # removing val_loss magic word to map to checkpoint + ES callback
                if 'val_loss' in flat:
                    flat['checkpoint_on'] = flat['val_loss']
                    flat['early_stop_on'] = flat['val_loss']
                self.trainer.logger_connector.callback_metrics.update(flat)
Ejemplo n.º 4
0
 def __update_callback_metrics(self, eval_results, using_eval_result):
     if using_eval_result:
         if isinstance(eval_results, list):
             for eval_result in eval_results:
                 self.callback_metrics = eval_result.callback_metrics
         else:
             self.callback_metrics = eval_results.callback_metrics
     else:
         if isinstance(eval_results, list):
             for eval_result in eval_results:
                 # with a scalar return, auto set it to "val_loss" for callbacks
                 if isinstance(eval_result, torch.Tensor):
                     flat = {'val_loss': eval_result}
                 else:
                     flat = flatten_dict(eval_result)
                 self.callback_metrics.update(flat)
         else:
             # with a scalar return, auto set it to "val_loss" for callbacks
             if isinstance(eval_results, torch.Tensor):
                 flat = {'val_loss': eval_results}
             else:
                 flat = flatten_dict(eval_results)
             self.callback_metrics.update(flat)
Ejemplo n.º 5
0
 def on_evaluation_epoch_end(self, eval_results, using_eval_result):
     if using_eval_result:
         if isinstance(eval_results, list):
             for eval_result in eval_results:
                 self.trainer.logger_connector.callback_metrics = eval_result.callback_metrics
         else:
             self.trainer.logger_connector.callback_metrics = eval_results.callback_metrics
     else:
         if isinstance(eval_results, list):
             for eval_result in eval_results:
                 # with a scalar return, auto set it to "val_loss" for callbacks
                 if isinstance(eval_result, torch.Tensor):
                     flat = {'val_loss': eval_result}
                 else:
                     flat = flatten_dict(eval_result)
                 self.trainer.logger_connector.callback_metrics.update(flat)
         else:
             # with a scalar return, auto set it to "val_loss" for callbacks
             if isinstance(eval_results, torch.Tensor):
                 flat = {'val_loss': eval_results}
             else:
                 flat = flatten_dict(eval_results)
             self.trainer.logger_connector.callback_metrics.update(flat)