コード例 #1
0
    def _init_metrics(self):
        """ Starts up the metrics and statistics watchers. One watcher is created
        for each of the learners to be evaluated.

        """
        self.mean_eval_measurements = []
        self.current_eval_measurements = []

        if self._task_type == constants.CLASSIFICATION:
            for i in range(self.n_models):
                self.mean_eval_measurements.append(
                    ClassificationMeasurements())
                self.current_eval_measurements.append(
                    WindowClassificationMeasurements(
                        window_size=self.n_sliding))

        elif self._task_type == constants.MULTI_TARGET_CLASSIFICATION:
            for i in range(self.n_models):
                self.mean_eval_measurements.append(
                    MultiTargetClassificationMeasurements())
                self.current_eval_measurements.append(
                    WindowMultiTargetClassificationMeasurements(
                        window_size=self.n_sliding))

        elif self._task_type == constants.REGRESSION:
            for i in range(self.n_models):
                self.mean_eval_measurements.append(RegressionMeasurements())
                self.current_eval_measurements.append(
                    WindowRegressionMeasurements(window_size=self.n_sliding))

        elif self._task_type == constants.MULTI_TARGET_REGRESSION:
            for i in range(self.n_models):
                self.mean_eval_measurements.append(
                    MultiTargetRegressionMeasurements())
                self.current_eval_measurements.append(
                    WindowMultiTargetRegressionMeasurements(
                        window_size=self.n_sliding))

        # Running time
        self.running_time_measurements = []
        for i in range(self.n_models):
            self.running_time_measurements.append(RunningTimeMeasurements())

        # Evaluation data buffer
        self._data_dict = {}
        for metric in self.metrics:
            data_ids = [constants.MEAN, constants.CURRENT]
            if metric == constants.TRUE_VS_PREDICTED:
                data_ids = [constants.Y_TRUE, constants.Y_PRED]
            elif metric == constants.DATA_POINTS:
                data_ids = ['X', 'target_values', 'prediction']
            elif metric == constants.RUNNING_TIME:
                data_ids = [
                    'training_time', 'testing_time', 'total_running_time'
                ]
            elif metric == constants.MODEL_SIZE:
                data_ids = ['model_size']
            self._data_dict[metric] = data_ids

        self._data_buffer = EvaluationDataBuffer(data_dict=self._data_dict)
コード例 #2
0
    def _init_metrics(self):
        """ Starts up the metrics and statistics watchers. One watcher is created
        for each of the learners to be evaluated.

        """
        self.mean_eval_measurements = []
        self.current_eval_measurements = []

        if self._task_type == constants.CLASSIFICATION:
            for i in range(self.n_models):
                self.mean_eval_measurements.append(
                    ClassificationMeasurements())
                self.current_eval_measurements.append(
                    WindowClassificationMeasurements(
                        window_size=self.n_sliding))

        elif self._task_type == constants.MULTI_OUTPUT:
            for i in range(self.n_models):
                self.mean_eval_measurements.append(MultiOutputMeasurements())
                self.current_eval_measurements.append(
                    WindowMultiOutputMeasurements(window_size=self.n_sliding))

        elif self._task_type == constants.REGRESSION:
            for i in range(self.n_models):
                self.mean_eval_measurements.append(RegressionMeasurements())
                self.current_eval_measurements.append(
                    WindowRegressionMeasurements(window_size=self.n_sliding))
コード例 #3
0
def test_regression_measurements():
    y_true = np.sin(range(100))
    y_pred = np.sin(range(100)) + .05

    measurements = RegressionMeasurements()
    for i in range(len(y_true)):
        measurements.add_result(y_true[i], y_pred[i])

    expected_mse = 0.0025000000000000022
    assert np.isclose(expected_mse, measurements.get_mean_square_error())

    expected_ae = 0.049999999999999906
    assert np.isclose(expected_ae, measurements.get_average_error())

    expected_info = 'RegressionMeasurements: - sample_count: 100 - mean_square_error: 0.002500 ' \
                    '- mean_absolute_error: 0.050000'
    assert expected_info == measurements.get_info()

    expected_last = (-0.9992068341863537, -0.9492068341863537)
    assert np.alltrue(expected_last == measurements.get_last())

    measurements.reset()
    assert measurements.sample_count == 0
    def __init__(self, base_estimator=HoeffdingTreeRegressor(grace_period=50,
                                                             split_confidence=0.01,
                                                             random_state=1),
                 n_estimators: int = 100,
                 subspace_mode: str = "percentage",
                 subspace_size: int = 60,
                 training_method: str = "randompatches",
                 lam: float = 6.0,
                 drift_detection_method: BaseDriftDetector = ADWIN(delta=1e-5),
                 warning_detection_method: BaseDriftDetector = ADWIN(delta=1e-4),
                 disable_weighted_vote: bool = True,
                 disable_drift_detection: bool = False,
                 disable_background_learner: bool = False,
                 drift_detection_criteria='error',
                 aggregation_method='mean',
                 nominal_attributes=None,
                 random_state=None):

        super().__init__(base_estimator=base_estimator,
                         n_estimators=n_estimators,
                         subspace_mode=subspace_mode,
                         subspace_size=subspace_size,
                         training_method=training_method,
                         lam=lam,
                         drift_detection_method=drift_detection_method,
                         warning_detection_method=warning_detection_method,
                         disable_weighted_vote=disable_weighted_vote,
                         disable_drift_detection=disable_drift_detection,
                         disable_background_learner=disable_background_learner,
                         nominal_attributes=nominal_attributes,
                         random_state=random_state)
        self._base_performance_evaluator = RegressionMeasurements()
        self._base_learner_class = StreamingRandomPatchesRegressorBaseLearner

        if aggregation_method not in {self._MEAN, self._MEDIAN}:
            raise ValueError("Invalid aggregation_method: {}.\n"
                             "Valid options are: {}".format(aggregation_method,
                                                            {self._MEAN, self._MEDIAN}))
        self.aggregation_method = aggregation_method

        if drift_detection_criteria not in {self._ERROR, self._PREDICTION}:
            raise ValueError("Invalid drift_detection_criteria: {}.\n"
                             "Valid options are: {}".format(drift_detection_criteria,
                                                            {self._ERROR, self._PREDICTION}))
        self.drift_detection_criteria = drift_detection_criteria