def call(self, trial_split: TrialSplit, epoch_number: int, total_epochs: int, input_train: DataContainer, pred_train: DataContainer, input_val: DataContainer, pred_val: DataContainer, context: ExecutionContext, is_finished_and_fitted: bool): if self.pass_context_to_metric_function: train_score = self.metric_function(pred_train.expected_outputs, pred_train.data_inputs, context=context) validation_score = self.metric_function(pred_val.expected_outputs, pred_val.data_inputs, context=context) else: train_score = self.metric_function(pred_train.expected_outputs, pred_train.data_inputs) validation_score = self.metric_function(pred_val.expected_outputs, pred_val.data_inputs) trial_split.add_metric_results_train( name=self.name, score=train_score, higher_score_is_better=self.higher_score_is_better, log_metric=self.log_metrics ) trial_split.add_metric_results_validation( name=self.name, score=validation_score, higher_score_is_better=self.higher_score_is_better, log_metric=self.log_metrics ) return False
def call( self, trial_split: TrialSplit, epoch_number: int, total_epochs: int, input_train: DataContainer, pred_train: DataContainer, input_val: DataContainer, pred_val: DataContainer, context: ExecutionContext, is_finished_and_fitted: bool ): if self.metric_name is None: validation_scores = trial_split.get_validation_scores() else : validation_scores = trial_split.get_metric_validation_results(self.metric_name) if len(validation_scores) > self.n_epochs_without_improvement: higher_score_is_better = trial_split.is_higher_score_better() if (higher_score_is_better) and \ all(validation_scores[-self.n_epochs_without_improvement] >= v for v in validation_scores[-self.n_epochs_without_improvement:]): return True elif (not higher_score_is_better) and \ all(validation_scores[-self.n_epochs_without_improvement] <= v for v in validation_scores[-self.n_epochs_without_improvement:]): return True return False
def call(self, trial: TrialSplit, epoch_number: int, total_epochs: int, input_train: DataContainer, pred_train: DataContainer, input_val: DataContainer, pred_val: DataContainer, is_finished_and_fitted: bool): validation_scores = trial.get_validation_scores() if len(validation_scores) > self.n_epochs_without_improvement: higher_score_is_better = trial.is_higher_score_better() if validation_scores[-1] == 0: return False if higher_score_is_better: if validation_scores[-2] >= validation_scores[-1]: self.epochs_without_improvement += 1 else: self.epochs_without_improvement = 0 if not higher_score_is_better: if validation_scores[-2] <= validation_scores[-1]: self.epochs_without_improvement += 1 else: self.epochs_without_improvement = 0 if self.epochs_without_improvement == self.n_epochs_without_improvement: self.epochs_without_improvement = 0 return True return False
def call(self, trial: TrialSplit, epoch_number: int, total_epochs: int, input_train: DataContainer, pred_train: DataContainer, input_val: DataContainer, pred_val: DataContainer, is_finished_and_fitted: bool): train_score = self.metric_function(pred_train.expected_outputs, pred_train.data_inputs) validation_score = self.metric_function(pred_val.expected_outputs, pred_val.data_inputs) trial.add_metric_results_train( name=self.name, score=train_score, higher_score_is_better=self.higher_score_is_better) trial.add_metric_results_validation( name=self.name, score=validation_score, higher_score_is_better=self.higher_score_is_better) if self.print_metrics: self.print_function('{} train: {}'.format(self.name, train_score)) self.print_function('{} validation: {}'.format( self.name, validation_score)) return False
def fit_trial_split(self, trial_split: TrialSplit, train_data_container: DataContainer, validation_data_container: DataContainer, context: ExecutionContext) -> TrialSplit: """ Train pipeline using the training data container. Track training, and validation metrics for each epoch. :param train_data_container: train data container :param validation_data_container: validation data container :param trial_split: trial to execute :param context: execution context :return: executed trial """ for i in range(self.epochs): context.logger.info('epoch {}/{}'.format(i + 1, self.epochs)) trial_split = trial_split.fit_trial_split( train_data_container.copy(), context.copy().set_execution_phase(ExecutionPhase.TRAIN)) y_pred_train = trial_split.predict_with_pipeline( train_data_container.copy(), context.copy().set_execution_phase(ExecutionPhase.VALIDATION)) y_pred_val = trial_split.predict_with_pipeline( validation_data_container.copy(), context.copy().set_execution_phase(ExecutionPhase.VALIDATION)) if self.callbacks.call( trial_split=trial_split, epoch_number=i, total_epochs=self.epochs, input_train=train_data_container, pred_train=y_pred_train, input_val=validation_data_container, pred_val=y_pred_val, context=context.copy().set_execution_phase( ExecutionPhase.VALIDATION), is_finished_and_fitted=False, ): break # Saves the metrics trial_split.save_parent_trial() return trial_split
def call(self, trial: TrialSplit, epoch_number: int, total_epochs: int, input_train: DataContainer, pred_train: DataContainer, input_val: DataContainer, pred_val: DataContainer, is_finished_and_fitted: bool): if trial.is_new_best_score(): if self.wrapped_callback.call(trial, epoch_number, total_epochs, input_train, pred_train, input_val, pred_val, is_finished_and_fitted): return True return False
def fit_trial_split( self, trial_split: TrialSplit, train_data_container: DataContainer, validation_data_container: DataContainer, context: ExecutionContext ) -> TrialSplit: """ Train pipeline using the training data container. Track training, and validation metrics for each epoch. :param train_data_container: train data container :param validation_data_container: validation data container :param trial_split: trial to execute :param context: execution context :return: executed trial """ early_stopping = False for i in range(self.epochs): self.print_func('\nepoch {}/{}'.format(i + 1, self.epochs)) trial_split = trial_split.fit_trial_split(train_data_container.copy(), context) y_pred_train = trial_split.predict_with_pipeline(train_data_container.copy(), context) y_pred_val = trial_split.predict_with_pipeline(validation_data_container.copy(), context) if self.callbacks.call( trial=trial_split, epoch_number=i, total_epochs=self.epochs, input_train=train_data_container, pred_train=y_pred_train, input_val=validation_data_container, pred_val=y_pred_val, is_finished_and_fitted=early_stopping ): break return trial_split
def call(self, trial: TrialSplit, epoch_number: int, total_epochs: int, input_train: DataContainer, pred_train: DataContainer, input_val: DataContainer, pred_val: DataContainer, is_finished_and_fitted: bool): trial.save_model() return False
def call(self, trial_split: TrialSplit, epoch_number: int, total_epochs: int, input_train: DataContainer, pred_train: DataContainer, input_val: DataContainer, pred_val: DataContainer, context: ExecutionContext, is_finished_and_fitted: bool): trial_split.save_model(self.label) return False