Esempio n. 1
0
 def test_bypass_stack(self):
     data = q.var(np.random.random((3, 5)).astype(dtype="float32")).v
     stack = q.Stack(q.Forward(5, 5), q.argsave.spec(a=0), q.Forward(5, 5),
                     q.Forward(5, 5), q.argmap.spec(0, ["a"]),
                     q.Lambda(lambda x, y: torch.cat([x, y], 1)),
                     q.Forward(10, 7))
     out = stack(data)
     print(out)
     self.assertEqual(out.size(), (3, 7))
Esempio n. 2
0
    def test_shapes(self):
        batsize, seqlen, inpdim = 5, 7, 8
        vocsize, embdim, encdim = 20, 9, 10
        ctxtoinitff = q.Forward(inpdim, encdim)
        coreff = q.Forward(encdim, encdim)
        initstategen = q.Lambda(lambda *x, **kw: coreff(ctxtoinitff(x[1][:, -1, :])), register_modules=coreff)

        decoder_cell = q.AttentionDecoderCell(
            attention=q.Attention().forward_gen(inpdim, encdim+embdim, encdim),
            embedder=nn.Embedding(vocsize, embdim),
            core=q.RecStack(
                q.GRUCell(embdim + inpdim, encdim),
                q.GRUCell(encdim, encdim),
                coreff
            ),
            smo=q.Stack(
                q.Forward(encdim+inpdim, encdim),
                q.Forward(encdim, vocsize),
                q.Softmax()
            ),
            init_state_gen=initstategen,
            ctx_to_decinp=True,
            ctx_to_smo=True,
            state_to_smo=True,
            decinp_to_att=True
        )
        decoder = decoder_cell.to_decoder()

        ctx = np.random.random((batsize, seqlen, inpdim))
        ctx = Variable(torch.FloatTensor(ctx))
        ctxmask = np.ones((batsize, seqlen))
        ctxmask[:, -2:] = 0
        ctxmask[[0, 1], -3:] = 0
        ctxmask = Variable(torch.FloatTensor(ctxmask))
        inp = np.random.randint(0, vocsize, (batsize, seqlen))
        inp = Variable(torch.LongTensor(inp))

        decoded = decoder(inp, ctx, ctxmask)

        self.assertEqual((batsize, seqlen, vocsize), decoded.size())
        self.assertTrue(np.allclose(
            np.sum(decoded.data.numpy(), axis=-1),
            np.ones_like(np.sum(decoded.data.numpy(), axis=-1))))
        print(decoded.size())
Esempio n. 3
0
 def test_shapes(self):
     batsize, seqlen, vocsize, embdim, encdim = 5, 3, 20, 4, 6
     m = q.RecurrentStack(nn.Embedding(vocsize, embdim),
                          q.GRULayer(embdim, encdim),
                          q.Forward(encdim, vocsize), q.LogSoftmax())
     data = Variable(
         torch.LongTensor(np.random.randint(0, vocsize, (batsize, seqlen))))
     pred = m(data)
     print(pred)
     self.assertEqual((batsize, seqlen, vocsize), pred.size())
     self.assertEqual((batsize, encdim), m.get_states(0)[0].size())
Esempio n. 4
0
    def test_dynamic_bypass_stack(self):
        data = q.var(np.random.random((3, 5)).astype(dtype="float32")).v
        stack = q.Stack()
        nlayers = 5
        for i in range(nlayers):
            stack.add(q.argsave.spec(a=0), q.Forward(5, 5), q.Forward(5, 5),
                      q.argmap.spec(0, ["a"]), q.Lambda(lambda x, y: x + y))
        out = stack(data)
        print(out)
        self.assertEqual(out.size(), (3, 5))

        out.sum().backward()

        forwards = []
        for layer in stack.layers:
            if isinstance(layer, q.Forward):
                self.assertTrue(layer.lin.weight.grad is not None)
                self.assertTrue(layer.lin.bias.grad is not None)
                print(layer.lin.weight.grad.norm(2))
                self.assertTrue(layer.lin.weight.grad.norm(2).data[0] > 0)
                self.assertTrue(layer.lin.bias.grad.norm(2).data[0] > 0)
                forwards.append(layer)

        self.assertEqual(len(forwards), nlayers * 2)
Esempio n. 5
0
    def test_simple_decoder_shape(self):
        batsize, seqlen, vocsize = 5, 4, 7
        embdim, encdim, outdim = 10, 16, 10
        # model def
        decoder_cell = q.DecoderCell(
            nn.Embedding(vocsize, embdim, padding_idx=0),
            q.GRUCell(embdim, encdim),
            q.Forward(encdim, vocsize),
            q.Softmax()
        )
        decoder = decoder_cell.to_decoder()
        # end model def
        data = np.random.randint(0, vocsize, (batsize, seqlen))
        data = Variable(torch.LongTensor(data))

        decoded = decoder(data).data.numpy()
        self.assertEqual(decoded.shape, (batsize, seqlen, vocsize))     # shape check
        self.assertTrue(np.allclose(np.sum(decoded, axis=-1), np.ones_like(np.sum(decoded, axis=-1))))  # prob check
Esempio n. 6
0
    def test_fast_context_decoder_shape(self):
        batsize, seqlen, vocsize = 5, 4, 7
        embdim, encdim, outdim, ctxdim = 10, 16, 10, 8
        # model def
        decoder = q.ContextDecoder(
            nn.Embedding(vocsize, embdim, padding_idx=0),
            q.RecurrentStack(
                q.GRULayer(embdim + ctxdim, encdim),
                q.Forward(encdim, vocsize),
                q.Softmax()
            )
        )
        # end model def
        data = np.random.randint(0, vocsize, (batsize, seqlen))
        data = Variable(torch.LongTensor(data))
        ctx = Variable(torch.FloatTensor(np.random.random((batsize, ctxdim))))

        decoded = decoder(data, ctx).data.numpy()
        self.assertEqual(decoded.shape, (batsize, seqlen, vocsize))  # shape check
        self.assertTrue(np.allclose(np.sum(decoded, axis=-1), np.ones_like(np.sum(decoded, axis=-1))))  # prob check