Ejemplo n.º 1
0
    def _init_metrics(self):
        """ Starts up the metrics and statistics watchers. One watcher is created
        for each of the learners to be evaluated.

        """
        self.mean_eval_measurements = []
        self.current_eval_measurements = []

        if self._task_type == constants.CLASSIFICATION:
            for i in range(self.n_models):
                self.mean_eval_measurements.append(
                    ClassificationMeasurements())
                self.current_eval_measurements.append(
                    WindowClassificationMeasurements(
                        window_size=self.n_sliding))

        elif self._task_type == constants.MULTI_TARGET_CLASSIFICATION:
            for i in range(self.n_models):
                self.mean_eval_measurements.append(
                    MultiTargetClassificationMeasurements())
                self.current_eval_measurements.append(
                    WindowMultiTargetClassificationMeasurements(
                        window_size=self.n_sliding))

        elif self._task_type == constants.REGRESSION:
            for i in range(self.n_models):
                self.mean_eval_measurements.append(RegressionMeasurements())
                self.current_eval_measurements.append(
                    WindowRegressionMeasurements(window_size=self.n_sliding))

        elif self._task_type == constants.MULTI_TARGET_REGRESSION:
            for i in range(self.n_models):
                self.mean_eval_measurements.append(
                    MultiTargetRegressionMeasurements())
                self.current_eval_measurements.append(
                    WindowMultiTargetRegressionMeasurements(
                        window_size=self.n_sliding))

        # Running time
        self.running_time_measurements = []
        for i in range(self.n_models):
            self.running_time_measurements.append(RunningTimeMeasurements())

        # Evaluation data buffer
        self._data_dict = {}
        for metric in self.metrics:
            data_ids = [constants.MEAN, constants.CURRENT]
            if metric == constants.TRUE_VS_PREDICTED:
                data_ids = [constants.Y_TRUE, constants.Y_PRED]
            elif metric == constants.DATA_POINTS:
                data_ids = ['X', 'target_values', 'prediction']
            elif metric == constants.RUNNING_TIME:
                data_ids = [
                    'training_time', 'testing_time', 'total_running_time'
                ]
            elif metric == constants.MODEL_SIZE:
                data_ids = ['model_size']
            self._data_dict[metric] = data_ids

        self._data_buffer = EvaluationDataBuffer(data_dict=self._data_dict)
def test_multi_target_regression_measurements():
    y_true = np.zeros((100, 3))
    y_pred = np.zeros((100, 3))

    for t in range(3):
        y_true[:, t] = np.sin(range(100))
        y_pred[:, t] = np.sin(range(100)) + (t + 1) * .05

    measurements = MultiTargetRegressionMeasurements()
    for i in range(100):
        measurements.add_result(y_true[i], y_pred[i])

    expected_amse = 0.011666666666666664
    assert np.isclose(expected_amse,
                      measurements.get_average_mean_square_error())

    expected_aae = 0.09999999999999999
    assert np.isclose(expected_aae, measurements.get_average_absolute_error())

    expected_armse = 0.09999999999999999
    assert np.isclose(expected_armse,
                      measurements.get_average_root_mean_square_error())

    expected_info = 'MultiTargetRegressionMeasurements: sample_count: 100 - ' \
                    'average_mean_square_error: {} - ' \
                    'average_mean_absolute_error: {} - ' \
                    'average_root_mean_square_error: {}'.format(
                        str(measurements.get_average_mean_square_error()),
                        str(measurements.get_average_absolute_error()),
                        str(measurements.get_average_root_mean_square_error())
                    )
    assert expected_info == measurements.get_info()

    expected_last = (np.array([-0.99920683, -0.99920683, -0.99920683]),
                     np.array([-0.94920683, -0.89920683, -0.84920683]))
    for exp, obs in zip(expected_last, measurements.get_last()):
        assert np.isclose(exp, obs).all()

    measurements.reset()
    assert measurements.sample_count == 0