示例#1
0
    def test_launch_model_check_default_parameters(self, partial):
        # Check default parameters.
        param_config = {
            "verbose": "no",
        }
        if partial:
            param_config["model_parameters"] = {}

        for n_threads in range(1, 7):
            param_config["max_threads"] = n_threads
            df = get_fake_df(10)
            predictor = MockUpModel(param_config)
            model_result = predictor.launch_model(df.copy(),
                                                  max_threads=n_threads)

            expected_train_set_lengths = [2, 4, 6, 8, 9]

            assert predictor.test_values == 1
            assert predictor.delta_training_values == 2
            assert predictor.main_accuracy_estimator == "mae"
            assert type(predictor.transformation) == Identity

            assert len(model_result.results) == 5

            for r in model_result.results:
                prediction = r.prediction
                testing_performances = r.testing_performances
                first_used_index = testing_performances.first_used_index

                used_training_set = df.loc[first_used_index:]
                used_training_set = used_training_set.iloc[:-1]
                assert len(prediction) == len(used_training_set) + 1 + 10
                expected_train_set_lengths.remove(len(used_training_set))

            assert len(expected_train_set_lengths) == 0
示例#2
0
    def test_launch_model_with_extra_regressors(self):
        # Test extra-regressors.
        param_config = {
            "model_parameters": {
                "test_percentage": 10,
                "delta_training_percentage": 20,
                "prediction_lags": 10,
                "transformation": "none",
                "main_accuracy_estimator": "mae"
            },
        }

        for n_threads in range(1, 7):
            param_config["max_threads"] = n_threads
            df = get_fake_df(10)
            extra_regressor = get_fake_df(20, name="A")

            predictor = MockUpModel(param_config)
            model_result = predictor.launch_model(
                df.copy(), extra_regressors=extra_regressor)

            expected_train_set_lengths = [2, 4, 6, 8, 9]

            assert predictor.test_values == 1
            assert predictor.delta_training_values == 2
            assert predictor.main_accuracy_estimator == "mae"
            assert len(model_result.results) == 5

            assert predictor.extra_regressors_in_predict is extra_regressor
            assert predictor.extra_regressors_in_training is extra_regressor

            for r in model_result.results:
                prediction = r.prediction
                testing_performances = r.testing_performances
                first_used_index = testing_performances.first_used_index

                used_training_set = df.loc[first_used_index:]
                used_training_set = used_training_set.iloc[:-1]
                assert len(prediction) == len(used_training_set) + 1 + 10
                expected_train_set_lengths.remove(len(used_training_set))

            assert len(expected_train_set_lengths) == 0
示例#3
0
    def test_launch_percentages_over_100(self):
        # Percentages' sum is over 100%; adapt the window.
        param_config = {
            "model_parameters": {
                "test_values": 5,
                "delta_training_percentage": 100,
                "prediction_lags": 10,
                "transformation": "none",
                "main_accuracy_estimator": "mae"
            },
        }

        for n_threads in range(1, 3):
            param_config["max_threads"] = n_threads
            df = get_fake_df(100)
            predictor = MockUpModel(param_config)
            model_result = predictor.launch_model(df.copy(),
                                                  max_threads=n_threads)

            expected_train_set_lengths = [95]

            assert predictor.test_values == 5
            assert predictor.delta_training_values == 100
            assert predictor.main_accuracy_estimator == "mae"

            assert len(model_result.results) == 1

            for r in model_result.results:
                prediction = r.prediction
                testing_performances = r.testing_performances
                first_used_index = testing_performances.first_used_index

                used_training_set = df.loc[first_used_index:]
                used_training_set = used_training_set.iloc[:-5]
                assert len(prediction) == len(used_training_set) + 5 + 10
                expected_train_set_lengths.remove(len(used_training_set))

            assert len(expected_train_set_lengths) == 0
示例#4
0
    def test_launch_model_float_percentages(self):
        # Percentages' sum is not 100% and values are float; adapt the windows. Use "test_percentage".
        param_config = {
            "model_parameters": {
                "test_percentage": 11.7,
                "delta_training_percentage": 18.9,
                "prediction_lags": 10,
                "transformation": "none",
                "main_accuracy_estimator": "mae"
            },
        }

        for n_threads in range(1, 7):
            param_config["max_threads"] = n_threads
            df = get_fake_df(101)
            predictor = MockUpModel(param_config)
            model_result = predictor.launch_model(df.copy(),
                                                  max_threads=n_threads)

            expected_train_set_lengths = [19, 38, 57, 76, 89]

            assert predictor.test_values == 12
            assert predictor.delta_training_values == 19
            assert predictor.main_accuracy_estimator == "mae"

            assert len(model_result.results) == 5

            for r in model_result.results:
                prediction = r.prediction
                testing_performances = r.testing_performances
                first_used_index = testing_performances.first_used_index

                used_training_set = df.loc[first_used_index:]
                used_training_set = used_training_set.iloc[:-12]
                assert len(prediction) == len(used_training_set) + 10 + 12
                expected_train_set_lengths.remove(len(used_training_set))

            assert len(expected_train_set_lengths) == 0