def call( self, trial_split: TrialSplit, epoch_number: int, total_epochs: int, input_train: DataContainer, pred_train: DataContainer, input_val: DataContainer, pred_val: DataContainer, context: ExecutionContext, is_finished_and_fitted: bool ): if self.metric_name is None: validation_scores = trial_split.get_validation_scores() else : validation_scores = trial_split.get_metric_validation_results(self.metric_name) if len(validation_scores) > self.n_epochs_without_improvement: higher_score_is_better = trial_split.is_higher_score_better() if (higher_score_is_better) and \ all(validation_scores[-self.n_epochs_without_improvement] >= v for v in validation_scores[-self.n_epochs_without_improvement:]): return True elif (not higher_score_is_better) and \ all(validation_scores[-self.n_epochs_without_improvement] <= v for v in validation_scores[-self.n_epochs_without_improvement:]): return True return False
def call(self, trial: TrialSplit, epoch_number: int, total_epochs: int, input_train: DataContainer, pred_train: DataContainer, input_val: DataContainer, pred_val: DataContainer, is_finished_and_fitted: bool): validation_scores = trial.get_validation_scores() if len(validation_scores) > self.n_epochs_without_improvement: higher_score_is_better = trial.is_higher_score_better() if validation_scores[-1] == 0: return False if higher_score_is_better: if validation_scores[-2] >= validation_scores[-1]: self.epochs_without_improvement += 1 else: self.epochs_without_improvement = 0 if not higher_score_is_better: if validation_scores[-2] <= validation_scores[-1]: self.epochs_without_improvement += 1 else: self.epochs_without_improvement = 0 if self.epochs_without_improvement == self.n_epochs_without_improvement: self.epochs_without_improvement = 0 return True return False