コード例 #1
0
 def test_s2s_predict_save_restore(self):
     model = Seq2SeqPytorch()
     config = {"batch_size": 128}
     model.fit_eval(self.train_data[0], self.train_data[1], self.val_data,
                    **config)
     pred = model.predict(self.test_data[0])
     assert pred.shape == self.test_data[1].shape
     with tempfile.TemporaryDirectory() as tmp_dir_name:
         ckpt_name = os.path.join(tmp_dir_name, "ckpt")
         model.save(ckpt_name)
         model_1 = Seq2SeqPytorch()
         model_1.restore(ckpt_name)
         pred_1 = model_1.predict(self.test_data[0])
         assert np.allclose(pred, pred_1)
コード例 #2
0
 def test_s2s_teacher_forcing_fit_evaluate(self):
     model = Seq2SeqPytorch()
     config = {"batch_size": 128, "teacher_forcing": True}
     model.fit_eval(self.train_data[0], self.train_data[1], self.val_data,
                    **config)
     mse, smape = model.evaluate(self.val_data[0],
                                 self.val_data[1],
                                 metrics=["mse", "smape"])
     assert len(
         mse) == self.val_data[1].shape[-1] * self.val_data[1].shape[-2]
     assert len(
         smape) == self.val_data[1].shape[-1] * self.val_data[1].shape[-2]
コード例 #3
0
    def __init__(
        self,
        input_feature_num,
        future_seq_len,
        output_feature_num,
        lstm_hidden_dim=128,
        lstm_layer_num=1,
        teacher_forcing=False,
        dropout=0.25,
        lr=0.001,
        loss="mse",
        optimizer="Adam",
    ):
        """
        Build a LSTM Sequence to Sequence Forecast Model.

        :param future_seq_len: Specify the output time steps (i.e. horizon).
        :param input_feature_num: Specify the feature dimension.
        :param output_feature_num: Specify the output dimension.
        :param lstm_hidden_dim: LSTM hidden channel for decoder and encoder.
        :param lstm_layer_num: LSTM layer number for decoder and encoder.
        :param teacher_forcing: If use teacher forcing in training.
        :param dropout: Specify the dropout close possibility (i.e. the close
               possibility to a neuron). This value defaults to 0.25.
        :param optimizer: Specify the optimizer used for training. This value
               defaults to "Adam".
        :param loss: Specify the loss function used for training. This value
               defaults to "mse". You can choose from "mse", "mae" and
               "huber_loss".
        :param lr: Specify the learning rate. This value defaults to 0.001.
        """
        self.check_optional_config = False
        self.model_config = {
            "input_feature_num": input_feature_num,
            "future_seq_len": future_seq_len,
            "output_feature_num": output_feature_num,
            "lstm_hidden_dim": lstm_hidden_dim,
            "lstm_layer_num": lstm_layer_num,
            "teacher_forcing": teacher_forcing,
            "dropout": dropout,
            "lr": lr,
            "loss": loss,
            "optimizer": optimizer,
        }
        self.internal = Seq2SeqPytorch(check_optional_config=False)