def test_R2Score(generated_data): r2_score = R2Score() assert np.isnan(r2_score.result().numpy()) # test as single batch r2_score.update_state(generated_data.y_true, generated_data.y_good) good_single_batch = r2_score.result().numpy() assert np.isreal(good_single_batch) # test as two batches r2_score.reset_states() r2_score.update_state(generated_data.y_true[:SPLIT_POINT], generated_data.y_good[:SPLIT_POINT]) r2_score.update_state(generated_data.y_true[SPLIT_POINT:], generated_data.y_good[SPLIT_POINT:]) good_two_batch = r2_score.result().numpy() assert np.isreal(good_two_batch) # single batch and multi-batch should be very close assert np.isclose(good_single_batch, good_two_batch) # good predictions should be close to 1 assert np.abs(1.0 - good_two_batch) < 1e-3 # test for bad predictions r2_score.reset_states() r2_score.update_state(generated_data.y_true[:SPLIT_POINT], generated_data.y_bad[:SPLIT_POINT]) r2_score.update_state(generated_data.y_true[SPLIT_POINT:], generated_data.y_bad[SPLIT_POINT:]) bad_prediction_score = r2_score.result().numpy() # r2 score for bad should be "far away" from 1 assert bad_prediction_score < 0.05
def _setup_metrics(self): self.metric_functions[LOSS] = self.eval_loss_function self.metric_functions[ERROR] = ErrorScore(name='metric_error') self.metric_functions[MEAN_SQUARED_ERROR] = MeanSquaredErrorMetric( name='metric_mse') self.metric_functions[MEAN_ABSOLUTE_ERROR] = MeanAbsoluteErrorMetric( name='metric_mae') self.metric_functions[R2] = R2Score(name='metric_r2')
def _setup_metrics(self): self.metric_functions = {} # needed to shadow class variable self.metric_functions[LOSS] = self.eval_loss_function self.metric_functions[MEAN_SQUARED_ERROR] = MeanSquaredErrorMetric( name='metric_mse' ) self.metric_functions[MEAN_ABSOLUTE_ERROR] = MeanAbsoluteErrorMetric( name='metric_mae' ) self.metric_functions[R2] = R2Score(name='metric_r2')
def _setup_metrics(self): self.metric_functions = {} # needed to shadow class variable if self.loss[TYPE] == 'mean_squared_error': self.metric_functions[LOSS] = MSEMetric(name='eval_loss') else: self.metric_functions[LOSS] = MAEMetric(name='eval_loss') self.metric_functions[ERROR] = ErrorScore(name='metric_error') self.metric_functions[MEAN_SQUARED_ERROR] = MeanSquaredErrorMetric( name='metric_mse') self.metric_functions[MEAN_ABSOLUTE_ERROR] = MeanAbsoluteErrorMetric( name='metric_mae') self.metric_functions[R2] = R2Score(name='metric_r2')
def _setup_metrics(self): self.metric_functions = {} # needed to shadow class variable if self.loss[TYPE] == "mean_squared_error": self.metric_functions[LOSS] = MSEMetric(name="eval_loss") elif self.loss[TYPE] == "mean_absolute_error": self.metric_functions[LOSS] = MAEMetric(name="eval_loss") elif self.loss[TYPE] == "root_mean_squared_error": self.metric_functions[LOSS] = RMSEMetric(name="eval_loss") elif self.loss[TYPE] == "root_mean_squared_percentage_error": self.metric_functions[LOSS] = RMSPEMetric(name="eval_loss") self.metric_functions[MEAN_SQUARED_ERROR] = MeanSquaredErrorMetric( name="metric_mse" ) self.metric_functions[MEAN_ABSOLUTE_ERROR] = MeanAbsoluteErrorMetric( name="metric_mae" ) self.metric_functions[ ROOT_MEAN_SQUARED_ERROR ] = RootMeanSquaredErrorMetric(name="metric_rmse") self.metric_functions[ ROOT_MEAN_SQUARED_PERCENTAGE_ERROR ] = RMSPEMetric(name="metric_rmspe") self.metric_functions[R2] = R2Score(name="metric_r2")