예제 #1
0
    def stepwise_embeddings(self, x):
        if self.lstm.bidirectional:
            sz = x.size()
            batch = torch.zeros(sz[0],
                                sz[0],
                                sz[2],
                                dtype=torch.float,
                                device='cuda' if self.use_cuda else 'cpu')
            lengths = np.array(np.arange(sz[0])[::-1] + 1)
            for i, length in enumerate(lengths):
                batch[:length, i, :] = x[:length, 0, :]
            batch = torch.nn.utils.rnn.pack_padded_sequence(batch, lengths)
            _, (h, _) = self.lstm(batch)

            h_stepwise = hidden2fc_input(self.lstm, h)
        else:
            h_stepwise, (_, _) = self.lstm(x)

        x = h_stepwise
        for fc in self.fc:
            x = fc(x)

        # the flipping here comes not from the bidirectionality of the net,
        # but because the batch must be ordered longest first for PyTorch
        if self.lstm.bidirectional:
            x = torch.flip(x, dims=[0])

        return self.loss_delegate.mod_forward(x)
예제 #2
0
    def embedding(self, x):
        _, (h, _) = self.lstm(x)  # hidden state defaults to zero

        x = hidden2fc_input(self.lstm, h)
        for fc in self.fc[:-1]:
            x = fc(x)

        return x
예제 #3
0
    def embedding(self, x):
        _, h = self.gru(x)  # hidden state defaults to zero

        x = hidden2fc_input(self.gru, h)
        for fc in self.fc[:-1]:
            x = fc(x)

        return x
예제 #4
0
    def forward(self, x):
        data, lengths = x
        net_input = torch.nn.utils.rnn.pack_padded_sequence(
            data, lengths, batch_first=self.batch_first)
        _, h = self.gru(net_input)  # hidden state defaults to zero

        x = hidden2fc_input(self.gru, h)
        for fc in self.fc:
            x = fc(x)

        return x
예제 #5
0
    def stepwise_embeddings(self, x):
        if self.gru.bidirectional:
            sz = x.size()
            batch = torch.zeros(sz[0], sz[0], sz[2], dtype=torch.float, device='cuda' if self.use_cuda else 'cpu')
            lengths = np.array(np.arange(sz[0])[::-1] + 1)
            for i, length in enumerate(lengths):
                batch[:length, i, :] = x[:length, 0, :]
            batch = torch.nn.utils.rnn.pack_padded_sequence(batch, lengths)
            _, h = self.gru(batch)

            h_stepwise = hidden2fc_input(self.gru, h)
        else:
            # TODO: packed sequence handled here?
            h_stepwise, _ = self.gru(x)

        x = h_stepwise
        for fc in self.fc:
            x = fc(x)

        # the batch must be ordered longest first for PyTorch, flipping it to restore the expected order
        if self.gru.bidirectional:
            x = torch.flip(x, dims=[0])

        return self.loss_delegate.mod_forward(x)