def setup_method(self, method): seq_len = 480 self.num_samples = 300 self.config = { "max_y_iterations": 1, "init_XF_epoch": 1, "max_FX_epoch": 1, "max_TCN_epoch": 1, "alt_iters": 2, } self.model = TCMF() self.Ymat = np.random.rand(self.num_samples, seq_len)
def test_save_restore(self): self.model.fit_eval(x=self.Ymat, y=None, **self.config) result_save = self.model.predict(x=None, horizon=self.horizon) model_file = "tmp.pkl" self.model.save(model_file) assert os.path.isfile(model_file) new_model = TCMF() new_model.restore(model_file) assert new_model.model result_restore = new_model.predict(x=None, horizon=self.horizon) assert_array_almost_equal(result_save, result_restore, decimal=2), \ "Prediction values are not the same after restore: " \ "predict before is {}, and predict after is {}".format(result_save, result_restore) os.remove(model_file)
class TestTCMF(ZooTestCase): def setup_method(self, method): seq_len = 480 self.num_samples = 300 self.config = { "max_y_iterations": 1, "init_FX_epoch": 1, "max_FX_epoch": 1, "max_TCN_epoch": 1, "alt_iters": 2, } self.model = TCMF() self.Ymat = np.random.rand(self.num_samples, seq_len) self.horizon = np.random.randint(1, 50) def teardown_method(self, method): del self.model del self.Ymat def test_fit_predict_evaluate(self): self.model.fit_eval(x=self.Ymat, y=None, **self.config) # test predict result = self.model.predict(x=None, horizon=self.horizon) assert result.shape[1] == self.horizon # test evaluate target = np.random.rand(self.num_samples, self.horizon) evaluate_result = self.model.evaluate(y=target, metrics=['mae', 'smape']) assert len(evaluate_result) == 2 assert len(evaluate_result[0]) == self.horizon assert len(evaluate_result[1]) == self.horizon def test_predict_evaluate_error(self): with pytest.raises(ValueError): self.model.predict(x=1) with pytest.raises(ValueError): self.model.evaluate(x=1, y=np.random.rand(self.num_samples, self.horizon)) with pytest.raises(ValueError): self.model.evaluate(x=None, y=None) with pytest.raises(Exception): self.model.predict(x=None) with pytest.raises(Exception): self.model.evaluate(x=None, y=np.random.rand(self.num_samples, self.horizon)) def test_save_restore(self): self.model.fit_eval(x=self.Ymat, y=None, **self.config) result_save = self.model.predict(x=None, horizon=self.horizon) model_file = "tmp.pkl" self.model.save(model_file) assert os.path.isfile(model_file) new_model = TCMF() new_model.restore(model_file) assert new_model.model result_restore = new_model.predict(x=None, horizon=self.horizon) assert_array_almost_equal(result_save, result_restore, decimal=2), \ "Prediction values are not the same after restore: " \ "predict before is {}, and predict after is {}".format(result_save, result_restore) os.remove(model_file)
class TCMFForecaster(Forecaster): def __init__(self, vbsize=128, hbsize=256, num_channels_X=[32, 32, 32, 32, 32, 1], num_channels_Y=[16, 16, 16, 16, 16, 1], kernel_size=7, dropout=0.1, rank=64, kernel_size_Y=7, learning_rate=0.0005, val_len=24, end_index=-24, normalize=False, start_date="2020-4-1", freq="1H", covariates=None, use_time=True, dti=None, svd=None, period=24, forward_cov=None, max_y_iterations=300, init_XF_epoch=100, max_FX_epoch=300, max_TCN_epoch=300, alt_iters=10): """ Initialize :param vbsize: :param hbsize: :param num_channels_X: :param num_channels_Y: :param kernel_size: :param dropout: :param rank: :param kernel_size_Y: :param learning_rate: :param val_len: :param end_index: :param normalize: :param start_date: :param freq: :param use_time: :param dti: :param svd: :param period: :param forward_cov: :param max_y_iterations, :param init_XF_epoch, :param max_FX_epoch, :param max_TCN_epoch, :param alt_iters """ self.internal = None self.config = { "vbsize": vbsize, "hbsize": hbsize, "num_channels_X": num_channels_X, "num_channels_Y": num_channels_Y, "kernel_size": kernel_size, "dropout": dropout, "rank": rank, "kernel_size_Y": kernel_size_Y, "learning_rate": learning_rate, "val_len": val_len, "end_index": end_index, "normalize": normalize, "start_date": start_date, "freq": freq, "covariates": covariates, "use_time": use_time, "dti": dti, "svd": svd, "period": period, "forward_cov": forward_cov, "max_y_iterations": max_y_iterations, "init_XF_epoch": init_XF_epoch, "max_FX_epoch": max_FX_epoch, "max_TCN_epoch": max_TCN_epoch, "alt_iters": alt_iters, } self.model = self._build() def _build(self): self.internal = TCMF() return self.internal._build(**self.config) def fit(self, x, incremental=False): """ fit the model :param x: the input :param covariates: the global covariates :param lr: learning rate :param incremental: if the fit is incremental :return: """ if incremental: self.internal.fit_incremental(x) else: self.internal.fit_eval(x) def evaluate( self, target_value, x=None, metric=['mae'], covariates=None, ): """ evaluate the model :param target_value: target value for evaluation. We interpret its second dimension of as the horizon length for evaluation. :param covariates: global covariates :param x: the input :param metric: the metrics :return: """ self.internal.evaluate(y=target_value, x=x, metrics=metric) def predict( self, x=None, horizon=24, covariates=None, ): """ predict :param x: the input. We don't support input x directly :param horizon: horizon length to look forward. :param covariates: the global covariates :return: """ self.internal.predict(x=x, horizon=horizon)
def _build(self): self.internal = TCMF() return self.internal._build(**self.config)
class TCMFForecaster(Forecaster): def __init__(self, vbsize=128, hbsize=256, num_channels_X=[32, 32, 32, 32, 32, 1], num_channels_Y=[16, 16, 16, 16, 16, 1], kernel_size=7, dropout=0.1, rank=64, kernel_size_Y=7, learning_rate=0.0005, val_len=24, normalize=False, start_date="2020-4-1", freq="1H", covariates=None, use_time=True, dti=None, svd=True, period=24, max_y_iterations=300, init_FX_epoch=100, max_FX_epoch=300, max_TCN_epoch=300, alt_iters=10): """ Initialize :param vbsize: int, default is 128. Vertical batch size, which is the number of cells per batch. :param hbsize: int, default is 256. Horizontal batch size, which is the number of time series per batch. :param num_channels_X: list, default=[32, 32, 32, 32, 32, 1]. List containing channel progression of temporal convolution network for local model :param num_channels_Y: list, default=[16, 16, 16, 16, 16, 1] List containing channel progression of temporal convolution network for hybrid model. :param kernel_size: int, default is 7. Kernel size for local models :param dropout: float, default is 0.1. Dropout rate during training :param rank: int, default is 64. The rank in matrix factorization of global model. :param kernel_size_Y: int, default is 7. Kernel size of hybrid model :param learning_rate: float, default is 0.0005 :param val_len: int, default is 24. Validation length. We will use the last val_len time points as validation data. :param normalize: boolean, false by default. Whether to normalize input data for training. :param start_date: str or datetime-like. Start date time for the time-series. e.g. "2020-01-01" :param freq: str or DateOffset, default is 'H' Frequency of data :param use_time: boolean, default is True. Whether to use time coveriates. :param covariates: 2-D ndarray or None. The shape of ndarray should be (r, T), where r is the number of covariates and T is the number of time points. Global covariates for all time series. If None, only default time coveriates will be used while use_time is True. If not, the time coveriates used is the stack of input covariates and default time coveriates. :param dti: DatetimeIndex or None. If None, use default fixed frequency DatetimeIndex generated with start_date and freq. :param svd: boolean, default is False. Whether factor matrices are initialized by NMF :param period: int, default is 24. Periodicity of input time series, leave it out if not known :param max_y_iterations: int, default is 300. Max number of iterations while training the hybrid model. :param init_FX_epoch: int, default is 100. Number of iterations while initializing factors :param max_FX_epoch: int, default is 300. Max number of iterations while training factors. :param max_TCN_epoch: int, default is 300. Max number of iterations while training the local model. :param alt_iters: int, default is 10. Number of iterations while alternate training. """ self.internal = None self.config = { "vbsize": vbsize, "hbsize": hbsize, "num_channels_X": num_channels_X, "num_channels_Y": num_channels_Y, "kernel_size": kernel_size, "dropout": dropout, "rank": rank, "kernel_size_Y": kernel_size_Y, "learning_rate": learning_rate, "val_len": val_len, "normalize": normalize, "start_date": start_date, "freq": freq, "covariates": covariates, "use_time": use_time, "dti": dti, "svd": svd, "period": period, "max_y_iterations": max_y_iterations, "init_FX_epoch": init_FX_epoch, "max_FX_epoch": max_FX_epoch, "max_TCN_epoch": max_TCN_epoch, "alt_iters": alt_iters, } self.model = self._build() def _build(self): self.internal = TCMF() return self.internal._build(**self.config) def fit(self, x, incremental=False): """ fit the model :param x: the input :param covariates: the global covariates :param lr: learning rate :param incremental: if the fit is incremental :return: """ if incremental: self.internal.fit_incremental(x) else: self.internal.fit_eval(x) def evaluate( self, target_value, x=None, metric=['mae'], covariates=None, ): """ evaluate the model :param target_value: target value for evaluation. We interpret its second dimension of as the horizon length for evaluation. :param covariates: global covariates :param x: the input :param metric: the metrics :return: """ self.internal.evaluate(y=target_value, x=x, metrics=metric) def predict( self, x=None, horizon=24, covariates=None, ): """ predict :param x: the input. We don't support input x directly :param horizon: horizon length to look forward. :param covariates: the global covariates :return: """ self.internal.predict(x=x, horizon=horizon)