def _init_metrics(self): """ Starts up the metrics and statistics watchers. One watcher is created for each of the learners to be evaluated. """ self.mean_eval_measurements = [] self.current_eval_measurements = [] if self._task_type == constants.CLASSIFICATION: for i in range(self.n_models): self.mean_eval_measurements.append( ClassificationMeasurements()) self.current_eval_measurements.append( WindowClassificationMeasurements( window_size=self.n_sliding)) elif self._task_type == constants.MULTI_TARGET_CLASSIFICATION: for i in range(self.n_models): self.mean_eval_measurements.append( MultiTargetClassificationMeasurements()) self.current_eval_measurements.append( WindowMultiTargetClassificationMeasurements( window_size=self.n_sliding)) elif self._task_type == constants.REGRESSION: for i in range(self.n_models): self.mean_eval_measurements.append(RegressionMeasurements()) self.current_eval_measurements.append( WindowRegressionMeasurements(window_size=self.n_sliding)) elif self._task_type == constants.MULTI_TARGET_REGRESSION: for i in range(self.n_models): self.mean_eval_measurements.append( MultiTargetRegressionMeasurements()) self.current_eval_measurements.append( WindowMultiTargetRegressionMeasurements( window_size=self.n_sliding)) # Running time self.running_time_measurements = [] for i in range(self.n_models): self.running_time_measurements.append(RunningTimeMeasurements()) # Evaluation data buffer self._data_dict = {} for metric in self.metrics: data_ids = [constants.MEAN, constants.CURRENT] if metric == constants.TRUE_VS_PREDICTED: data_ids = [constants.Y_TRUE, constants.Y_PRED] elif metric == constants.DATA_POINTS: data_ids = ['X', 'target_values', 'prediction'] elif metric == constants.RUNNING_TIME: data_ids = [ 'training_time', 'testing_time', 'total_running_time' ] elif metric == constants.MODEL_SIZE: data_ids = ['model_size'] self._data_dict[metric] = data_ids self._data_buffer = EvaluationDataBuffer(data_dict=self._data_dict)
def _init_metrics(self): """ Starts up the metrics and statistics watchers. One watcher is created for each of the learners to be evaluated. """ self.mean_eval_measurements = [] self.current_eval_measurements = [] if self._task_type == constants.CLASSIFICATION: for i in range(self.n_models): self.mean_eval_measurements.append( ClassificationMeasurements()) self.current_eval_measurements.append( WindowClassificationMeasurements( window_size=self.n_sliding)) elif self._task_type == constants.MULTI_OUTPUT: for i in range(self.n_models): self.mean_eval_measurements.append(MultiOutputMeasurements()) self.current_eval_measurements.append( WindowMultiOutputMeasurements(window_size=self.n_sliding)) elif self._task_type == constants.REGRESSION: for i in range(self.n_models): self.mean_eval_measurements.append(RegressionMeasurements()) self.current_eval_measurements.append( WindowRegressionMeasurements(window_size=self.n_sliding))
def test_window_regression_measurements(): y_true = np.sin(range(100)) y_pred = np.sin(range(100)) + .05 measurements = WindowRegressionMeasurements(window_size=20) for i in range(len(y_true)): measurements.add_result(y_true[i], y_pred[i]) expected_mse = 0.0025000000000000022 assert np.isclose(expected_mse, measurements.get_mean_square_error()) expected_ae = 0.050000000000000024 assert np.isclose(expected_ae, measurements.get_average_error()) expected_info = 'WindowRegressionMeasurements: - sample_count: 20 - mean_square_error: 0.002500 ' \ '- mean_absolute_error: 0.050000' assert expected_info == measurements.get_info() expected_last = (-0.9992068341863537, -0.9492068341863537) assert np.alltrue(expected_last == measurements.get_last()) measurements.reset() assert measurements.sample_count == 0