コード例 #1
0
    def forward(self, track):
        for note in track:
            gate_input = torch.cat(note, self.state)

            forget = F.sigmoid(self.forgetgate(gate_input))
            self.state *= forget

            inp = F.sigmoid(self.inputgate(gate_input))
            candidates = F.tanh(self.cadidate_gen(note))
            self.state += inp * candidates

        output = F.relu(self.outputlayer1(self.state))
        output = F.relu(self.outputlayer2(output))
        return F.log_softmax(output)
コード例 #2
0
    def forward(self, sentence):
        concatenated_output_state = []
        word_indices = []

        for word in sentence:
            self.char_hidden_state = self.init_hidden(
            )  # Refresh hidden state, detaching it from the earlier sequence
            character_indices = word[
                1]  # This has already been wrapped as a Torch.LongTensor
            character_level_embeddings = self.character_embeddings(
                character_indices
            )  # Use the tensor to index into the lookup table
            output_lstm, self.char_hidden_state = self.character_lstm(
                character_level_embeddings.view(len(sentence), 1,
                                                EMBEDDING_DIM),
                self.char_hidden_state)
            concatenated_output_state.append(
                output_lstm)  # Append LSTM state to the list
            word_indices.append(word[0])

        concatenated_output_state = torch.unsqueeze(
            concatenated_output_state,
            0)  # Convert to a tensor, add an extra first dimension

        word_embeddings = self.word_embeddings(
            torch.tensor(word_indices,
                         dtype=torch.long).view(len(sentence), 1,
                                                EMBEDDING_DIM))

        concatenated_characters_and_words = torch.cat(
            (word_embeddings, concatenated_output_state),
            len(list(word_embeddings.size())) -
            1)  # Concatenate the tensors along their last axis

        lstm_output_state, self.hidden = self.word_lstm(
            concatenated_characters_and_words, self.hidden)

        tag_space = self.hidden2tag(lstm_output_state.view(
            len(sentence),
            -1))  # A Linear layer mapping from tag space to scores

        tag_scores = F.log_softmax(
            tag_space, dim=None
        )  # Softmax along all dimensions. Log_softmax is required for NLLLoss

        return tag_scores
コード例 #3
0
	def forward(self, x):
		"""Performs generator step on input"""

		return F.log_softmax(self.proj(x), dim=-1)