Example #1
0
    def test_transformer_encoder_forward(self):
        batch_size = 2
        time_dim = 4
        torch.manual_seed(self.seed)

        encoder = TransformerEncoder(
            hidden_size=self.hidden_size, ff_size=self.ff_size,
            num_layers=self.num_layers, num_heads=self.num_heads,
            dropout=self.dropout, emb_dropout=self.dropout)

        for p in encoder.parameters():
            torch.nn.init.uniform_(p, -0.5, 0.5)

        x = torch.rand(size=(batch_size, time_dim, self.emb_size))

        # no padding, no mask
        x_length = torch.Tensor([time_dim] * batch_size).int()
        mask = torch.ones([batch_size, time_dim, 1]) == 1

        output, hidden = encoder(x, x_length, mask)

        self.assertEqual(output.shape, torch.Size(
            [batch_size, time_dim, self.hidden_size]))
        self.assertEqual(hidden, None)

        output_target = torch.Tensor(
            [[[0.1615, -0.1195, 0.0586, -0.0921, -0.3483, -0.3654, -0.6052,
               -0.3355, 0.3179, 0.2757, -0.2909, -0.0346],
              [0.1272, -0.1241, 0.0223, -0.1463, -0.3462, -0.1579, -0.5591,
               -0.6274, 0.1822, 0.3043, -0.3818, 0.0094],
              [0.0616, -0.1344, 0.0625, 0.0056, -0.2785, -0.4290, -0.5765,
               -0.5176, -0.0598, 0.3389, -0.5522, -0.1692],
              [0.1539, -0.1371, 0.0026, -0.0248, -0.0856, -0.3223, -0.5537,
               -0.3948, -0.2586, 0.2458, -0.2887, -0.0698]],
             [[0.1863, -0.1198, 0.1006, -0.0277, -0.3779, -0.3728, -0.6343,
               -0.3449, 0.2131, 0.2448, -0.3122, -0.1777],
              [0.0254, -0.1219, 0.0436, -0.0289, -0.2932, -0.2377, -0.6003,
               -0.5406, 0.2308, 0.3578, -0.3728, 0.0707],
              [0.1146, -0.1270, 0.1163, -0.0290, -0.3773, -0.3924, -0.5738,
               -0.6528, 0.1428, 0.3623, -0.4796, 0.0471],
              [0.0815, -0.1355, 0.1016, 0.0496, -0.3001, -0.4812, -0.5557,
               -0.6937, 0.1002, 0.2873, -0.4675, -0.1383]]]
        )
        self.assertTensorAlmostEqual(output_target, output)
    def test_transformer_encoder_forward(self):
        batch_size = 2
        time_dim = 4
        torch.manual_seed(self.seed)

        encoder = TransformerEncoder(hidden_size=self.hidden_size,
                                     ff_size=self.ff_size,
                                     num_layers=self.num_layers,
                                     num_heads=self.num_heads,
                                     dropout=self.dropout,
                                     emb_dropout=self.dropout)

        for p in encoder.parameters():
            torch.nn.init.uniform_(p, -0.5, 0.5)

        x = torch.rand(size=(batch_size, time_dim, self.emb_size))

        # no padding, no mask
        x_length = torch.Tensor([time_dim] * batch_size).int()
        mask = torch.ones([batch_size, time_dim, 1]) == 1

        output, hidden = encoder(x, x_length, mask)

        self.assertEqual(output.shape,
                         torch.Size([batch_size, time_dim, self.hidden_size]))
        self.assertEqual(hidden, None)

        output_target = torch.Tensor(
            [[[
                1.9728e-01, -1.2042e-01, 8.0998e-02, 1.3411e-03, -3.5960e-01,
                -5.2988e-01, -5.6056e-01, -3.5297e-01, 2.6680e-01, 2.8343e-01,
                -3.7342e-01, -5.9112e-03
            ],
              [
                  8.9687e-02, -1.2491e-01, 7.7809e-02, -1.3500e-03,
                  -2.7002e-01, -4.7312e-01, -5.7981e-01, -4.1998e-01,
                  1.0457e-01, 2.9726e-01, -3.9461e-01, 8.1598e-02
              ],
              [
                  3.4988e-02, -1.3020e-01, 6.0043e-02, 2.7782e-02, -3.1483e-01,
                  -3.8940e-01, -5.5557e-01, -5.9540e-01, -2.9808e-02,
                  3.1468e-01, -4.5809e-01, 4.3313e-03
              ],
              [
                  1.2234e-01, -1.3285e-01, 6.3068e-02, -2.3343e-02,
                  -2.3519e-01, -4.0794e-01, -5.6063e-01, -5.5484e-01,
                  -1.1272e-01, 3.0103e-01, -4.0983e-01, 3.3038e-02
              ]],
             [[
                 9.8597e-02, -1.2121e-01, 1.0718e-01, -2.2644e-02, -4.0282e-01,
                 -4.2646e-01, -5.9981e-01, -3.7200e-01, 1.9538e-01, 2.7036e-01,
                 -3.4072e-01, -1.7966e-03
             ],
              [
                  8.8470e-02, -1.2618e-01, 5.3351e-02, -1.8531e-02,
                  -3.3834e-01, -4.9047e-01, -5.7063e-01, -4.9790e-01,
                  2.2070e-01, 3.3964e-01, -4.1604e-01, 2.3519e-02
              ],
              [
                  5.8373e-02, -1.2706e-01, 1.0598e-01, 9.3277e-05, -3.0493e-01,
                  -4.4406e-01, -5.4723e-01, -5.2214e-01, 8.0374e-02,
                  2.6307e-01, -4.4571e-01, 8.7052e-02
              ],
              [
                  7.9567e-02, -1.2977e-01, 1.1731e-01, 2.6198e-02, -2.4024e-01,
                  -4.2161e-01, -5.7604e-01, -7.3298e-01, 1.6698e-01,
                  3.1454e-01, -4.9189e-01, 2.4027e-02
              ]]])
        self.assertTensorAlmostEqual(output_target, output)