class TestChronosModelMTNetForecaster(TestCase): def setUp(self): tf.keras.backend.clear_session() self.ft = TimeSequenceFeatureTransformer() self.create_data() def tearDown(self): pass def create_data(self): def gen_train_sample(data, past_seq_len, future_seq_len): data = pd.DataFrame(data) x, y = self.ft._roll_train(data, past_seq_len=past_seq_len, future_seq_len=future_seq_len ) return x, y def gen_test_sample(data, past_seq_len): test_data = pd.DataFrame(data) x = self.ft._roll_test(test_data, past_seq_len=past_seq_len) return x self.long_num = 6 self.time_step = 2 look_back = (self.long_num + 1) * self.time_step look_forward = 1 self.x_train, self.y_train = gen_train_sample(data=np.random.randn( 64, 4), past_seq_len=look_back, future_seq_len=look_forward) self.x_val, self.y_val = gen_train_sample(data=np.random.randn(16, 4), past_seq_len=look_back, future_seq_len=look_forward) self.x_test = gen_test_sample(data=np.random.randn(16, 4), past_seq_len=look_back) def test_forecast_mtnet(self): # TODO hacking to fix a bug target_dim = 1 model = MTNetForecaster(target_dim=target_dim, feature_dim=self.x_train.shape[-1], long_series_num=self.long_num, series_length=self.time_step ) x_train_long, x_train_short = model.preprocess_input(self.x_train) x_val_long, x_val_short = model.preprocess_input(self.x_val) x_test_long, x_test_short = model.preprocess_input(self.x_test) model.fit([x_train_long, x_train_short], self.y_train, validation_data=([x_val_long, x_val_short], self.y_val), batch_size=32, distributed=False) assert model.evaluate([x_val_long, x_val_short], self.y_val) predict_result = model.predict([x_test_long, x_test_short]) assert predict_result.shape == (self.x_test.shape[0], target_dim)
def setup_method(self, method): tf.keras.backend.clear_session() self.ft = TimeSequenceFeatureTransformer() self.create_data() self.model = MTNetKeras() self.config = { "long_num": self.long_num, "time_step": self.time_step, "ar_window": np.random.randint(1, 3), "cnn_height": np.random.randint(1, 3), "epochs": 1 }
def test_evaluate_predict_future_more_1(self): target_col = "values" metrics = ["mse", "r2"] future_seq_len = np.random.randint(2, 6) train_df, test_df, tsp, test_sample_num = self.get_input_tsp(future_seq_len, target_col) pipeline = tsp.fit(train_df, test_df) mse, rs = pipeline.evaluate(test_df, metrics=metrics) assert len(mse) == future_seq_len assert len(rs) == future_seq_len y_pred = pipeline.predict(test_df) assert y_pred.shape == (test_sample_num - default_past_seq_len + 1, future_seq_len + 1) y_pred_df = pipeline.predict(test_df[:-future_seq_len]) columns = ["{}_{}".format(target_col, i) for i in range(future_seq_len)] y_pred_value = y_pred_df[columns].values y_df = test_df[default_past_seq_len:] y_value = TimeSequenceFeatureTransformer()._roll_test(y_df[target_col], future_seq_len) mse_pred_eval, rs_pred_eval = [Evaluator.evaluate(m, y_value, y_pred_value) for m in metrics] mse_eval, rs_eval = pipeline.evaluate(test_df, metrics) assert_array_almost_equal(mse_pred_eval, mse_eval, decimal=2) assert_array_almost_equal(rs_pred_eval, rs_eval, decimal=2)
def setup_method(self, method): # super().setup_method(method) self.train_data = pd.DataFrame(data=np.random.randn(64, 4)) self.val_data = pd.DataFrame(data=np.random.randn(16, 4)) self.test_data = pd.DataFrame(data=np.random.randn(16, 4)) self.past_seq_len = 6 self.future_seq_len_1 = 1 self.future_seq_len_2 = 2 # use roll method in time_sequence self.feat = TimeSequenceFeatureTransformer() self.config = {'batch_size': 32, 'epochs': 1} self.model_1 = LSTMSeq2Seq(check_optional_config=False, future_seq_len=self.future_seq_len_1) self.model_2 = LSTMSeq2Seq(check_optional_config=False, future_seq_len=self.future_seq_len_2) self.fitted = False self.predict_1 = None self.predict_2 = None
def setup_method(self, method): self.ft = TimeSequenceFeatureTransformer() self.create_data()
class TestMTNetKeras(ZooTestCase): def setup_method(self, method): tf.keras.backend.clear_session() self.ft = TimeSequenceFeatureTransformer() self.create_data() self.model = MTNetKeras() self.config = { "long_num": self.long_num, "time_step": self.time_step, "ar_window": np.random.randint(1, 3), "cnn_height": np.random.randint(1, 3), "epochs": 1 } def teardown_method(self, method): pass def create_data(self): def gen_train_sample(data, past_seq_len, future_seq_len): data = pd.DataFrame(data) x, y = self.ft._roll_train(data, past_seq_len=past_seq_len, future_seq_len=future_seq_len) return x, y def gen_test_sample(data, past_seq_len): test_data = pd.DataFrame(data) x = self.ft._roll_test(test_data, past_seq_len=past_seq_len) return x self.long_num = 6 self.time_step = 2 look_back = (self.long_num + 1) * self.time_step look_forward = 1 self.x_train, self.y_train = gen_train_sample( data=np.random.randn(64, 4), past_seq_len=look_back, future_seq_len=look_forward) self.x_val, self.y_val = gen_train_sample(data=np.random.randn(16, 4), past_seq_len=look_back, future_seq_len=look_forward) self.x_test = gen_test_sample(data=np.random.randn(16, 4), past_seq_len=look_back) def test_fit_evaluate(self): self.model.fit_eval(data=(self.x_train, self.y_train), validation_data=(self.x_val, self.y_val), **self.config) self.model.evaluate(self.x_val, self.y_val) def test_save_restore(self): import os self.model.fit_eval(data=(self.x_train, self.y_train), validation_data=(self.x_val, self.y_val), **self.config) y_pred = self.model.predict(self.x_test) assert y_pred.shape == (self.x_test.shape[0], self.y_train.shape[1]) dirname = "/tmp" restored_model = MTNetKeras() ckpt = os.path.join(dirname, "mtnet.ckpt") self.model.save(checkpoint_file=ckpt) restored_model.restore(checkpoint_file=ckpt) predict_after = restored_model.predict(self.x_test) assert_array_almost_equal(y_pred, predict_after, decimal=2), \ "Prediction values are not the same after restore: " \ "predict before is {}, and predict after is {}".format(y_pred, predict_after) restored_model.fit_eval((self.x_train, self.y_train), epochs=1) restored_model.evaluate(self.x_val, self.y_val) os.remove(ckpt) def test_predict_with_uncertainty(self): self.model.fit_eval(data=(self.x_train, self.y_train), validation_data=(self.x_val, self.y_val), mc=True, **self.config) pred, uncertainty = self.model.predict_with_uncertainty(self.x_test, n_iter=2) assert pred.shape == (self.x_test.shape[0], self.y_train.shape[1]) assert uncertainty.shape == pred.shape assert np.any(uncertainty)
def setUp(self): tf.keras.backend.clear_session() self.ft = TimeSequenceFeatureTransformer() self.create_data()
class TestSeq2Seq(ZooTestCase): def setup_method(self, method): # super().setup_method(method) self.train_data = pd.DataFrame(data=np.random.randn(64, 4)) self.val_data = pd.DataFrame(data=np.random.randn(16, 4)) self.test_data = pd.DataFrame(data=np.random.randn(16, 4)) self.past_seq_len = 6 self.future_seq_len_1 = 1 self.future_seq_len_2 = 2 # use roll method in time_sequence self.feat = TimeSequenceFeatureTransformer() self.config = {'batch_size': 32, 'epochs': 1} self.model_1 = LSTMSeq2Seq(check_optional_config=False, future_seq_len=self.future_seq_len_1) self.model_2 = LSTMSeq2Seq(check_optional_config=False, future_seq_len=self.future_seq_len_2) self.fitted = False self.predict_1 = None self.predict_2 = None def teardown_method(self, method): pass def test_fit_eval_1(self): x_train_1, y_train_1 = self.feat._roll_train( self.train_data, past_seq_len=self.past_seq_len, future_seq_len=self.future_seq_len_1) print("fit_eval_future_seq_len_1:", self.model_1.fit_eval((x_train_1, y_train_1), **self.config)) assert self.model_1.past_seq_len == 6 assert self.model_1.feature_num == 4 assert self.model_1.future_seq_len == 1 assert self.model_1.target_col_num == 1 def test_fit_eval(self): past_seq_len = 6 future_seq_len = 2 input_dim = 5 output_dim = 4 x_train = np.random.rand(100, past_seq_len, input_dim) y_train = np.random.rand(100, future_seq_len, output_dim) x_test = np.random.rand(100, past_seq_len, input_dim) y_test = np.random.rand(100, future_seq_len, output_dim) model = LSTMSeq2Seq(check_optional_config=False, future_seq_len=future_seq_len) model_config = { 'batch_size': 32, 'epochs': 1, 'latent_dim': 128, 'dropout': 0.2 } model.fit_eval((x_train, y_train), **model_config) y_pred = model.predict(x_test) rmse, smape = model.evaluate(x=x_test, y=y_test, metric=["rmse", "smape"]) assert rmse.shape == smape.shape assert rmse.shape == (future_seq_len, output_dim) assert model.past_seq_len == past_seq_len assert model.future_seq_len == future_seq_len assert model.feature_num == input_dim assert model.target_col_num == output_dim assert y_pred.shape == y_test.shape def test_fit_eval_2(self): x_train_2, y_train_2 = self.feat._roll_train( self.train_data, past_seq_len=self.past_seq_len, future_seq_len=self.future_seq_len_2) print("fit_eval_future_seq_len_2:", self.model_2.fit_eval((x_train_2, y_train_2), **self.config)) assert self.model_2.future_seq_len == 2 self.fitted = True def test_evaluate_1(self): x_train_1, y_train_1 = self.feat._roll_train( self.train_data, past_seq_len=self.past_seq_len, future_seq_len=self.future_seq_len_1) x_val_1, y_val_1 = self.feat._roll_train( self.val_data, past_seq_len=self.past_seq_len, future_seq_len=self.future_seq_len_1) self.model_1.fit_eval((x_train_1, y_train_1), **self.config) print("evaluate_future_seq_len_1:", self.model_1.evaluate(x_val_1, y_val_1, metric=['mse', 'r2'])) def test_evaluate_2(self): x_train_2, y_train_2 = self.feat._roll_train( self.train_data, past_seq_len=self.past_seq_len, future_seq_len=self.future_seq_len_2) x_val_2, y_val_2 = self.feat._roll_train( self.val_data, past_seq_len=self.past_seq_len, future_seq_len=self.future_seq_len_2) self.model_2.fit_eval((x_train_2, y_train_2), **self.config) print("evaluate_future_seq_len_2:", self.model_2.evaluate(x_val_2, y_val_2, metric=['mse', 'r2'])) def test_predict_1(self): x_train_1, y_train_1 = self.feat._roll_train( self.train_data, past_seq_len=self.past_seq_len, future_seq_len=self.future_seq_len_1) x_test_1 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len) self.model_1.fit_eval((x_train_1, y_train_1), **self.config) predict_1 = self.model_1.predict(x_test_1) assert predict_1.shape == (x_test_1.shape[0], self.future_seq_len_1) def test_predict_2(self): x_train_2, y_train_2 = self.feat._roll_train( self.train_data, past_seq_len=self.past_seq_len, future_seq_len=self.future_seq_len_2) x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len) self.model_2.fit_eval((x_train_2, y_train_2), **self.config) predict_2 = self.model_2.predict(x_test_2) assert predict_2.shape == (x_test_2.shape[0], self.future_seq_len_2) def test_save_restore_single_step(self): future_seq_len = 1 x_train, y_train = self.feat._roll_train( self.train_data, past_seq_len=self.past_seq_len, future_seq_len=future_seq_len) x_test = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len) model = LSTMSeq2Seq(future_seq_len=future_seq_len) model.fit_eval((x_train, y_train), **self.config) predict_before = model.predict(x_test) new_model = LSTMSeq2Seq() ckpt = os.path.join("/tmp", "seq2seq.ckpt") model.save(ckpt) new_model.restore(ckpt) predict_after = new_model.predict(x_test) assert_array_almost_equal(predict_before, predict_after, decimal=2), \ "Prediction values are not the same after restore: " \ "predict before is {}, and predict after is {}".format(predict_before, predict_after) new_config = {'epochs': 1} new_model.fit_eval((x_train, y_train), **new_config) os.remove(ckpt) def test_save_restore_multistep(self): future_seq_len = np.random.randint(2, 6) x_train, y_train = self.feat._roll_train( self.train_data, past_seq_len=self.past_seq_len, future_seq_len=future_seq_len) x_test = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len) model = LSTMSeq2Seq(future_seq_len=future_seq_len) model.fit_eval((x_train, y_train), **self.config) predict_before = model.predict(x_test) new_model = LSTMSeq2Seq() ckpt = os.path.join("/tmp", "seq2seq.ckpt") model.save(ckpt) new_model.restore(ckpt) predict_after = new_model.predict(x_test) assert_array_almost_equal(predict_before, predict_after, decimal=2), \ "Prediction values are not the same after restore: " \ "predict before is {}, and predict after is {}".format(predict_before, predict_after) new_config = {'epochs': 1} new_model.fit_eval((x_train, y_train), **new_config) os.remove(ckpt) def test_predict_with_uncertainty(self, ): future_seq_len = np.random.randint(2, 6) x_train, y_train = self.feat._roll_train( self.train_data, past_seq_len=self.past_seq_len, future_seq_len=future_seq_len) x_test = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len) model = LSTMSeq2Seq(future_seq_len=future_seq_len) model.fit_eval((x_train, y_train), mc=True, **self.config) prediction, uncertainty = model.predict_with_uncertainty(x_test, n_iter=2) assert prediction.shape == (x_test.shape[0], future_seq_len) assert uncertainty.shape == (x_test.shape[0], future_seq_len) assert np.any(uncertainty) new_model = LSTMSeq2Seq() ckpt = os.path.join("/tmp", "seq2seq.ckpt") model.save(ckpt) new_model.restore(ckpt) prediction_after, uncertainty_after = new_model.predict_with_uncertainty( x_test, n_iter=2) assert prediction_after.shape == (x_test.shape[0], future_seq_len) assert uncertainty_after.shape == (x_test.shape[0], future_seq_len) assert np.any(uncertainty_after) os.remove(ckpt)