def test_accepts_one_model(self): regr1 = LinearRegression() regr2 = RandomForest(lags_future_covariates=[0]) model0 = RegressionEnsembleModel([self.get_local_models()[0]], 10) model1 = RegressionEnsembleModel([self.get_local_models()[0]], 10, regr1) model2 = RegressionEnsembleModel([self.get_local_models()[0]], 10, regr2) models = [model0, model1, model2] for model in models: model.fit(series=self.combined) model.predict(10)
def test_accepts_different_regression_models(self): regr1 = LinearRegression() regr2 = RandomForestRegressor() regr3 = RandomForest(lags_future_covariates=[0]) model0 = RegressionEnsembleModel(self.get_local_models(), 10) model1 = RegressionEnsembleModel(self.get_local_models(), 10, regr1) model2 = RegressionEnsembleModel(self.get_local_models(), 10, regr2) model3 = RegressionEnsembleModel(self.get_local_models(), 10, regr3) models = [model0, model1, model2, model3] for model in models: model.fit(series=self.combined) model.predict(10)
def test_ensemble_models_denoising_multi_input(self): # for every model, test whether it correctly denoises ts_sum_2 using ts_random_multi and ts_sum_2 as inputs # WARNING: this test isn't numerically stable, changing self.RANDOM_SEED can lead to exploding coefficients horizon = 10 _, _, ts_sum2, ts_cov2 = self.denoising_input() torch.manual_seed(self.RANDOM_SEED) ensemble_models = [ RNNModel( input_chunk_length=20, output_chunk_length=horizon, n_epochs=1, random_state=self.RANDOM_SEED, ), BlockRNNModel( input_chunk_length=20, output_chunk_length=horizon, n_epochs=1, random_state=self.RANDOM_SEED, ), RegressionModel(lags_past_covariates=[-1]), RegressionModel(lags_past_covariates=[-1]), ] ensemble = RegressionEnsembleModel(ensemble_models, horizon) self.helper_test_models_accuracy(ensemble, horizon, ts_sum2, ts_cov2, 3)
def test_train_predict_global_models_multivar_with_covariates(self): ensemble_models = self.get_global_models(output_chunk_length=10) ensemble_models.append( RegressionModel(lags=1, lags_past_covariates=[-1])) ensemble = RegressionEnsembleModel(ensemble_models, 10) ensemble.fit(self.seq1, self.cov1) ensemble.predict(10, self.seq2, self.cov2)
def test_torch_models_retrain(self): model1 = BlockRNNModel(input_chunk_length=12, output_chunk_length=1, random_state=0, n_epochs=2) model2 = BlockRNNModel(input_chunk_length=12, output_chunk_length=1, random_state=0, n_epochs=2) ensemble = RegressionEnsembleModel([model1], 5) ensemble.fit(self.combined) model1_fitted = ensemble.models[0] forecast1 = model1_fitted.predict(10) model2.fit(self.combined) forecast2 = model2.predict(10) self.assertAlmostEqual(sum(forecast1.values() - forecast2.values())[0], 0.0, places=2)
def test_train_predict_global_models_univar(self): ensemble_models = self.get_global_models(output_chunk_length=10) ensemble_models.append(RegressionModel(lags=1)) ensemble = RegressionEnsembleModel(ensemble_models, 10) ensemble.fit(series=self.combined) ensemble.predict(10)
def test_train_n_points(self): regr = LinearRegressionModel(lags_future_covariates=[0]) # same values ensemble = RegressionEnsembleModel(self.get_local_models(), 5, regr) # too big value to perform the split ensemble = RegressionEnsembleModel(self.get_local_models(), 100) with self.assertRaises(ValueError): ensemble.fit(self.combined) ensemble = RegressionEnsembleModel(self.get_local_models(), 50) with self.assertRaises(ValueError): ensemble.fit(self.combined) # too big value considering min_train_series_length ensemble = RegressionEnsembleModel(self.get_local_models(), 45) with self.assertRaises(ValueError): ensemble.fit(self.combined)