예제 #1
0
 def test_temporal_loader(self):
     loa = TemporalLoader(["month", "day", "day_of_week", "hour"],
                          self.kwargs)
     result = loa[0]
     self.assertEqual(len(result), 2)
     # Test output has proper dimensions
     # print(loa[0][0].shape)
     self.assertEqual(result[0][0].shape[0], 5)
     self.assertEqual(result[0][1].shape[1], 4)
     self.assertEqual(result[0][0].shape[1], 3)
     self.assertEqual(result[0][1].shape[0], 5)
     # Test output right order
     temporal_src_embd = result[0][1]
     second = temporal_src_embd[2, :]
     self.assertEqual(second[0], 5)
     self.assertEqual(second[1], 1)
     self.assertEqual(second[3], 3)
     # Test data loading component
     d = DataEmbedding(3, 128)
     embedding = d(result[0][0].unsqueeze(0),
                   temporal_src_embd.unsqueeze(0))
     self.assertEqual(embedding.shape[2], 128)
     i = Informer(3, 3, 3, 5, 5, out_len=4, factor=1)
     r0 = result[0][0].unsqueeze(0)
     r1 = result[0][1].unsqueeze(0)
     r3 = result[1][1].unsqueeze(0)
     r2 = result[1][0].unsqueeze(0)
     res = i(r0, r1, r3, r2)
     self.assertEqual(res.shape[1], 1)
예제 #2
0
 def test_temporal_loader(self):
     kwargs = {
         "file_path": "tests/test_data/keag_small.csv",
         "forecast_history": 5,
         "forecast_length": 1,
         "target_col": ["cfs"],
         "relevant_cols": ["cfs", "temp", "precip"],
         "sort_column": "date",
         "feature_params": {
             "datetime_params": {
                 "month": "numerical",
                 "day": "numerical",
                 "day_of_week": "numerical",
                 "hour": "numerical"
             }
         }
     }
     loa = TemporalLoader(["month", "day", "day_of_week", "hour"], kwargs)
     result = loa[0]
     self.assertEqual(len(result), 2)
     # Test output has proper dimensions
     # print(loa[0][0].shape)
     self.assertEqual(result[0][0].shape[0], 5)
     self.assertEqual(result[0][1].shape[1], 4)
     self.assertEqual(result[0][0].shape[1], 3)
     self.assertEqual(result[0][1].shape[0], 5)
     # Test output right order
     temporal_src_embd = result[0][1]
     second = temporal_src_embd[2, :]
     self.assertEqual(second[0], 5)
     self.assertEqual(second[1], 1)
     self.assertEqual(second[3], 3)
     # Test data loading component
     d = DataEmbedding(3, 128)
     embedding = d(result[0][0].unsqueeze(0),
                   temporal_src_embd.unsqueeze(0))
     self.assertEqual(embedding.shape[2], 128)
     i = Informer(3, 3, 3, 5, 5, out_len=4, factor=1)
     r0 = result[0][0].unsqueeze(0)
     r1 = result[0][1].unsqueeze(0)
     r3 = result[1][1].unsqueeze(0)
     r2 = result[1][0].unsqueeze(0)
     res = i(r0, r1, r3, r2)
     self.assertEqual(res.shape[1], 1)
예제 #3
0
 def test_data_embedding(self):
     d = DataEmbedding(5, 128, data=5)
     r = d(torch.rand(2, 10, 5), torch.rand(2, 10, 5))
     self.assertTrue(hasattr(d.temporal_embedding, "month_embed"))
     self.assertEqual(r.shape[2], 128)
예제 #4
0
    def __init__(self,
                 n_time_series: int,
                 dec_in: int,
                 c_out: int,
                 seq_len,
                 label_len,
                 out_len,
                 factor=5,
                 d_model=512,
                 n_heads=8,
                 e_layers=3,
                 d_layers=2,
                 d_ff=512,
                 dropout=0.0,
                 attn='prob',
                 embed='fixed',
                 temp_depth=4,
                 activation='gelu',
                 device=torch.device('cuda:0')):
        """ This is based on the implementation of the Informer available from the original authors
            https://github.com/zhouhaoyi/Informer2020. We have done some minimal refactoring, but
            the core code remains the same.

        :param n_time_series: The number of time series present in the multivariate forecasting problem.
        :type n_time_series: int
        :param dec_in: The input size to the decoder (e.g. the number of time series passed to the decoder)
        :type dec_in: int
        :param c_out: The output dimension of the model (usually will be the number of variables you are forecasting).
        :type c_out:  int
        :param seq_len: The number of historical time steps to pass into the model.
        :type seq_len: int
        :param label_len: The length of the label sequence passed into the decoder.
        :type label_len: int
        :param out_len: The overall output length from the decoder .
        :type out_len: int
        :param factor: The multiplicative factor in the probablistic attention mechanism, defaults to 5
        :type factor: int, optional
        :param d_model: The embedding dimension of the model, defaults to 512
        :type d_model: int, optional
        :param n_heads: The number of heads in the multi-head attention mechanism , defaults to 8
        :type n_heads: int, optional
        :param e_layers: The number of layers in the encoder, defaults to 3
        :type e_layers: int, optional
        :param d_layers: The number of layers in the decoder, defaults to 2
        :type d_layers: int, optional
        :param d_ff: The dimension of the forward pass, defaults to 512
        :type d_ff: int, optional
        :param dropout: [description], defaults to 0.0
        :type dropout: float, optional
        :param attn: The type of the attention mechanism either 'prob' or 'full', defaults to 'prob'
        :type attn: str, optional
        :param embed: Whether to use class: `FixedEmbedding` or `torch.nn.Embbeding` , defaults to 'fixed'
        :type embed: str, optional
        :param temp_depth: The temporald depth (e.g), defaults to 4
        :type data: int, optional
        :param activation: The activation func, defaults to 'gelu'
        :type activation: str, optional
        :param device: The device the model uses, defaults to torch.device('cuda:0')
        :type device: str, optional
        """
        super(Informer, self).__init__()
        self.pred_len = out_len
        self.label_len = label_len
        self.attn = attn
        self.c_out = c_out
        # Encoding
        self.enc_embedding = DataEmbedding(n_time_series, d_model, embed,
                                           temp_depth, dropout)
        self.dec_embedding = DataEmbedding(dec_in, d_model, embed, temp_depth,
                                           dropout)
        # Attention
        Attn = ProbAttention if attn == 'prob' else FullAttention
        # Encoder
        self.encoder = Encoder([
            EncoderLayer(AttentionLayer(
                Attn(False, factor, attention_dropout=dropout), d_model,
                n_heads),
                         d_model,
                         d_ff,
                         dropout=dropout,
                         activation=activation) for b in range(e_layers)
        ], [ConvLayer(d_model) for b in range(e_layers - 1)],
                               norm_layer=torch.nn.LayerNorm(d_model))
        # Decoder
        self.decoder = Decoder([
            DecoderLayer(
                AttentionLayer(
                    FullAttention(True, factor, attention_dropout=dropout),
                    d_model, n_heads),
                AttentionLayer(
                    FullAttention(False, factor, attention_dropout=dropout),
                    d_model, n_heads),
                d_model,
                d_ff,
                dropout=dropout,
                activation=activation,
            ) for c in range(d_layers)
        ],
                               norm_layer=torch.nn.LayerNorm(d_model))
        # self.end_conv1 = nn.Conv1d(in_channels=label_len+out_len, out_channels=out_len, kernel_size=1, bias=True)
        # self.end_conv2 = nn.Conv1d(in_channels=d_model, out_channels=c_out, kernel_size=1, bias=True)
        self.projection = nn.Linear(d_model, c_out, bias=True)