コード例 #1
0
ファイル: neural_net.py プロジェクト: xkuang/deep_parser
    def forward(self, sentence):
        """
        This function has two parts
        1. Look up the embeddings for the words in the sentence.
           These will be the inputs to the LSTM sequence model.
           NOTE: At this step, rather than be a list of embeddings,
           it should be a tensor of shape (len(sentence_idxs), 1, embedding_dim)
           The 1 is for the mini-batch size.  Don't get confused by it,
           just make it that shape.
        2. Now that you have your tensor of embeddings of shape (len(sentence_idxs, 1, embedding_dim)),
           You can pass it through your LSTM.
           Refer to the Pytorch documentation to see what the outputs are
        3. Convert the outputs into the correct return type, which is a list of
           embeddings of shape (1, embedding_dim)
        NOTE: Make sure you are reassigning self.hidden_state to the new hidden state!!!
        :param sentence A list of strs, the words of the sentence
        """
        assert self.word_to_ix is not None, "ERROR: Make sure to set word_to_ix on \
                the embedding lookup components"

        inp = utils.sequence_to_variable(sentence, self.word_to_ix,
                                         self.use_cuda)

        # STUDENT
        embeds = self.word_embeddings(inp)
        embeds = embeds.view(len(sentence), 1, self.word_embedding_dim)
        lstm_out, self.hidden = self.lstm(embeds, self.hidden)
        num = lstm_out.size(0)
        lstm_out = lstm_out.view(1, -1)
        out_l = list(torch.chunk(lstm_out, num, dim=1))
        return out_l
コード例 #2
0
    def forward(self, sentence):
        """
        This function has two parts
        1. Look up the embeddings for the words in the sentence.
           These will be the inputs to the LSTM sequence model.
           NOTE: At this step, rather than be a list of embeddings,
           it should be a tensor of shape (len(sentence_idxs), 1, embedding_dim)
           The 1 is for the mini-batch size.  Don't get confused by it,
           just make it that shape.
        2. Now that you have your tensor of embeddings of shape (len(sentence_idxs, 1, embedding_dim)),
           You can pass it through your LSTM.
           Refer to the Pytorch documentation to see what the outputs are
        3. Convert the outputs into the correct return type, which is a list of
           embeddings of shape (1, embedding_dim)
        NOTE: Make sure you are reassigning self.hidden_state to the new hidden state!!!
        :param sentence A list of strs, the words of the sentence
        """
        assert self.word_to_ix is not None, "ERROR: Make sure to set word_to_ix on \
                the embedding lookup components"

        inp = utils.sequence_to_variable(sentence, self.word_to_ix,
                                         self.use_cuda)

        # STUDENT
        wordEmbedding = self.word_embeddings(inp)
        wordEmbedding = wordEmbedding.expand(1,
                                             wordEmbedding.size()[0],
                                             wordEmbedding.size()[1])
        output, hidden = self.lstm(wordEmbedding, self.hidden)
        self.hidden = hidden
        embeddings = []
        for idx in range(len(sentence)):
            embeddings.append(output[0, idx].expand(1, output.size()[2]))
        return embeddings
コード例 #3
0
    def forward(self, sentence):
        inp = utils.sequence_to_variable(sentence, self.word_to_ix,
                                         self.use_cuda)

        idxs = map(lambda w: self.word_to_ix[w], sentence)
        tensor = torch.LongTensor(idxs)
        inputs = ag.Variable(tensor)
        embeds = self.word_embeddings(inputs)

        lstm_out, self.hidden = self.lstm(embeds.view(len(sentence), 1, -1),
                                          self.hidden)
        return [i for i in lstm_out]
コード例 #4
0
ファイル: neural_net.py プロジェクト: cedebrun/gt-nlp-class
 def forward(self, sentence):
     """
     :param sentence A list of strings, the text of the sentence
     :return A list of autograd.Variables, where list[i] is the
         embedding of word i in the sentence.
         NOTE: the Variables returned should be row vectors, that
             is, of shape (1, embedding_dim)
     """
     inp = utils.sequence_to_variable(sentence, self.word_to_ix, self.use_cuda)
     embeds = [] # store each Variable in here
     # STUDENT
     # END STUDENT
     return embeds
コード例 #5
0
 def forward(self, sentence):
     """
     :param sentence A list of strings, the text of the sentence
     :return A list of autograd.Variables, where list[i] is the
         embedding of word i in the sentence.
         NOTE: the Variables returned should be row vectors, that
             is, of shape (1, embedding_dim)
     """
     inp = utils.sequence_to_variable(sentence, self.word_to_ix, self.use_cuda)
     embeds = [] # store each Variable in here
     for i in inp:
         embeds.append(self.word_embeddings(i))
     return embeds
コード例 #6
0
ファイル: neural_net.py プロジェクト: xkuang/deep_parser
    def forward(self, sentence):
        """
        :param sentence A list of strings, the text of the sentence
        :return A list of autograd.Variables, where list[i] is the
            embedding of word i in the sentence.
            NOTE: the Variables returned should be row vectors, that
                is, of shape (1, embedding_dim)
        """
        inp = utils.sequence_to_variable(sentence, self.word_to_ix,
                                         self.use_cuda)

        # STUDENT
        # END STUDENT
        embeds = self.word_embeddings(inp)
        embeds = list(torch.chunk(embeds, embeds.size(0)))
        return embeds
コード例 #7
0
ファイル: neural_net.py プロジェクト: cedebrun/gt-nlp-class
 def forward(self, sentence):
     """
     This function has two parts
     1. Look up the embeddings for the words in the sentence.
        These will be the inputs to the LSTM sequence model.
        NOTE: At this step, rather than be a list of embeddings,
        it should be a tensor of shape (len(sentence_idxs), 1, embedding_dim)
        The 1 is for the mini-batch size.  Don't get confused by it,
        just make it that shape.
     2. Now that you have your tensor of embeddings of shape (len(sentence_idxs, 1, embedding_dim)),
        You can pass it through your LSTM.
        Refer to the Pytorch documentation to see what the outputs are
     3. Convert the outputs into the correct return type, which is a list of
        embeddings of shape (1, embedding_dim)
     NOTE: Make sure you are reassigning self.hidden_state to the new hidden state!!!
     :param sentence A list of strs, the words of the sentence
     """
     assert self.word_to_ix is not None, "ERROR: Make sure to set word_to_ix on \
             the embedding lookup components"
     inp = utils.sequence_to_variable(sentence, self.word_to_ix, self.use_cuda)
コード例 #8
0
    def forward(self, sentence):
        """
        This function has two parts
        1. Look up the embeddings for the words in the sentence.
           These will be the inputs to the LSTM sequence model.
           NOTE: At this step, rather than be a list of embeddings,
           it should be a tensor of shape (len(sentence_idxs), 1, embedding_dim)
           The 1 is for the mini-batch size.  Don't get confused by it,
           just make it that shape.
        2. Now that you have your tensor of embeddings of shape (len(sentence_idxs, 1, embedding_dim)),
           You can pass it through your LSTM.
           Refer to the Pytorch documentation to see what the outputs are
        3. Convert the outputs into the correct return type, which is a list of
           embeddings of shape (1, embedding_dim)
        NOTE: Make sure you are reassigning self.hidden_state to the new hidden state!!!
        :param sentence A list of strs, the words of the sentence
        """
        assert self.word_to_ix is not None, "ERROR: Make sure to set word_to_ix on \
                the embedding lookup components"

        inp = utils.sequence_to_variable(sentence, self.word_to_ix,
                                         self.use_cuda)

        embeds = ag.Variable(
            torch.FloatTensor(len(inp), 1, self.word_embedding_dim))

        for i in range(len(inp)):
            embed = self.word_embeddings(inp[i])
            embeds[i, 0, :] = embed

        out, self.hidden = self.lstm(embeds, self.hidden)
        #out=out.view(len(inp),1,self.hidden_dim)
        #hout =self.hiddenlayer(out)
        #self.hidden=out

        return out
コード例 #9
0
 def forward(self, sentence):
     inp = utils.sequence_to_variable(sentence, self.word_to_ix)
     embeds = []
     for v in inp:
         embeds.append(self.word_embeddings(v))
     return embeds