def setup_method(self, method): tf.keras.backend.clear_session() self.ft = TimeSequenceFeatureTransformer() self.create_data() self.model = MTNetKeras() self.config = { "long_num": self.long_num, "time_step": self.time_step, "ar_window": np.random.randint(1, 3), "cnn_height": np.random.randint(1, 3), "epochs": 1 }
def _build(self): """ build a MTNet model in tf.keras :return: a tf.keras MTNet model """ # TODO change this function call after MTNet fixes self.internal = MTNetKerasModel( check_optional_config=self.check_optional_config, future_seq_len=self.model_config.get('output_dim')) self.internal.apply_config(config=self.model_config) return self.internal.build()
def test_save_restore(self): self.model.fit_eval(data=(self.x_train, self.y_train), validation_data=(self.x_val, self.y_val), **self.config) y_pred = self.model.predict(self.x_test) assert y_pred.shape == (self.x_test.shape[0], self.y_train.shape[1]) dirname = "tmp" restored_model = MTNetKeras() try: save(dirname, model=self.model) restore(dirname, model=restored_model, config=self.config) predict_after = restored_model.predict(self.x_test) assert_array_almost_equal(y_pred, predict_after, decimal=2), \ "Prediction values are not the same after restore: " \ "predict before is {}, and predict after is {}".format(y_pred, predict_after) restored_model.fit_eval((self.x_train, self.y_train), epochs=1) restored_model.evaluate(self.x_val, self.y_val) finally: shutil.rmtree("tmp")
class TestMTNetKeras(ZooTestCase): def setup_method(self, method): tf.keras.backend.clear_session() self.ft = TimeSequenceFeatureTransformer() self.create_data() self.model = MTNetKeras() self.config = { "long_num": self.long_num, "time_step": self.time_step, "ar_window": np.random.randint(1, 3), "cnn_height": np.random.randint(1, 3), "epochs": 1 } def teardown_method(self, method): pass def create_data(self): def gen_train_sample(data, past_seq_len, future_seq_len): data = pd.DataFrame(data) x, y = self.ft._roll_train(data, past_seq_len=past_seq_len, future_seq_len=future_seq_len) return x, y def gen_test_sample(data, past_seq_len): test_data = pd.DataFrame(data) x = self.ft._roll_test(test_data, past_seq_len=past_seq_len) return x self.long_num = 6 self.time_step = 2 look_back = (self.long_num + 1) * self.time_step look_forward = 1 self.x_train, self.y_train = gen_train_sample( data=np.random.randn(64, 4), past_seq_len=look_back, future_seq_len=look_forward) self.x_val, self.y_val = gen_train_sample(data=np.random.randn(16, 4), past_seq_len=look_back, future_seq_len=look_forward) self.x_test = gen_test_sample(data=np.random.randn(16, 4), past_seq_len=look_back) def test_fit_evaluate(self): self.model.fit_eval(data=(self.x_train, self.y_train), validation_data=(self.x_val, self.y_val), **self.config) self.model.evaluate(self.x_val, self.y_val) def test_save_restore(self): self.model.fit_eval(data=(self.x_train, self.y_train), validation_data=(self.x_val, self.y_val), **self.config) y_pred = self.model.predict(self.x_test) assert y_pred.shape == (self.x_test.shape[0], self.y_train.shape[1]) dirname = "tmp" restored_model = MTNetKeras() try: save(dirname, model=self.model) restore(dirname, model=restored_model, config=self.config) predict_after = restored_model.predict(self.x_test) assert_array_almost_equal(y_pred, predict_after, decimal=2), \ "Prediction values are not the same after restore: " \ "predict before is {}, and predict after is {}".format(y_pred, predict_after) restored_model.fit_eval((self.x_train, self.y_train), epochs=1) restored_model.evaluate(self.x_val, self.y_val) finally: shutil.rmtree("tmp") def test_predict_with_uncertainty(self): self.model.fit_eval(data=(self.x_train, self.y_train), validation_data=(self.x_val, self.y_val), mc=True, **self.config) pred, uncertainty = self.model.predict_with_uncertainty(self.x_test, n_iter=2) assert pred.shape == (self.x_test.shape[0], self.y_train.shape[1]) assert uncertainty.shape == pred.shape assert np.any(uncertainty)
class MTNetForecaster(TFParkForecaster): """ MTNet Forecast Model """ def __init__(self, target_dim=1, feature_dim=1, long_series_num=1, series_length=1, ar_window_size=1, cnn_height=1, cnn_hid_size=32, rnn_hid_sizes=[16, 32], lr=0.001, loss="mae", cnn_dropout=0.2, rnn_dropout=0.2, metric="mean_squared_error", uncertainty: bool = False, ): """ Build a MTNet Forecast Model. :param target_dim: the dimension of model output :param feature_dim: the dimension of input feature :param long_series_num: the number of series for the long-term memory series :param series_length: the series size for long-term and short-term memory series :param ar_window_size: the auto regression window size in MTNet :param cnn_hid_size: the hidden layer unit for cnn in encoder :param rnn_hid_sizes: the hidden layers unit for rnn in encoder :param cnn_height: cnn filter height in MTNet :param metric: the metric for validation and evaluation :param uncertainty: whether to enable calculation of uncertainty :param lr: learning rate :param loss: the target function you want to optimize on :param cnn_dropout: the dropout possibility for cnn in encoder :param rnn_dropout: the dropout possibility for rnn in encoder """ self.check_optional_config = False self.mc = uncertainty self.model_config = { "feature_num": feature_dim, "output_dim": target_dim, "metrics": [metric], "mc": uncertainty, "time_step": series_length, "long_num": long_series_num, "ar_window": ar_window_size, "cnn_height": cnn_height, "past_seq_len": (long_series_num + 1) * series_length, "cnn_hid_size": cnn_hid_size, "rnn_hid_sizes": rnn_hid_sizes, "lr": lr, "cnn_dropout": cnn_dropout, "rnn_dropout": rnn_dropout, "loss": loss } self._internal = None super().__init__() def _build(self): """ build a MTNet model in tf.keras :return: a tf.keras MTNet model """ # TODO change this function call after MTNet fixes self.internal = MTNetKerasModel( check_optional_config=self.check_optional_config, future_seq_len=self.model_config.get('output_dim')) self.internal.apply_config(config=self.model_config) return self.internal.build() def preprocess_input(self, x): """ The original rolled features needs an extra step to process. This should be called before train_x, validation_x, and test_x :param x: the original samples from rolling :return: a tuple (long_term_x, short_term_x) which are long term and short term history respectively """ return self.internal._reshape_input_x(x)