Exemplo n.º 1
0
    def forward(self, head_embed, modifier_embed):
        """
        Do the next LSTM step, and return the hidden state as the new
        embedding for the reduction

        Here, note that PyTorch's LSTM wants the input to be a tensor with axis semantics
        (seq_len, batch_size, input_dimensionality), but we are not minibatching (so batch_size=1)
        and seq_len=1 since we are only doing 1 timestep

        NOTE: use utils.concat_and_flatten() like in the MLP Combiner
        NOTE: Make sure the tensor you hand to your LSTM is the size it wants:
            (seq_len, batch_size, embedding_dim), which in this case, is (1, 1, embedding_dim)
        NOTE: If you add more layers to the LSTM (more than 1), your code may break.
            To fix it, look at the value of self.hidden whenever you have more layers.

        :param head_embed Embedding of the head word
        :param modifier_embed Embedding of the modifier
        """

        x = utils.concat_and_flatten([head_embed,
                                      modifier_embed]).view(1, 1, -1)

        y = self.mlplstm(x, self.hidden)
        out, self.hidden = y

        return self.hidden[0][0]
Exemplo n.º 2
0
    def forward(self, inputs):
        """
        NOTE: Use utils.concat_and_flatten to combine all the features into one big
        row vector.

        :param inputs A list of autograd.Variables, which are all of the features we will use
        :return a Variable which is the log probabilities of the actions, of shape (1, 3)
            (it is a row vector, with an entry for each action)
        """
        # STUDENT
        inputs = utils.concat_and_flatten(inputs)
        log_prob = self.model(inputs)
        return log_prob
Exemplo n.º 3
0
    def forward(self, head_embed, modifier_embed):
        """
        HINT: use utils.concat_and_flatten() to combine head_embed and modifier_embed
        into a single tensor.

        :param head_embed The embedding of the head in the reduction
        :param modifier_embed The embedding of the modifier in the reduction
        :return The embedding of the combination as a row vector
        """
        # STUDENT
        theBigVector = utils.concat_and_flatten([head_embed, modifier_embed])
        first = self.firstLayer(theBigVector)
        tanh = F.tanh(first)
        return self.secondLayer(tanh)
Exemplo n.º 4
0
    def forward(self, head_embed, modifier_embed):
        """
        HINT: use utils.concat_and_flatten() to combine head_embed and modifier_embed
        into a single tensor.

        :param head_embed The embedding of the head in the reduction
        :param modifier_embed The embedding of the modifier in the reduction
        :return The embedding of the combination as a row vector
        """
        # STUDENT

        inputs = utils.concat_and_flatten([head_embed, modifier_embed])
        log_prob = self.model(inputs)
        return log_prob
Exemplo n.º 5
0
    def forward(self, head_embed, modifier_embed):
        """
        HINT: use utils.concat_and_flatten() to combine head_embed and modifier_embed
        into a single tensor.

        :param head_embed The embedding of the head in the reduction
        :param modifier_embed The embedding of the modifier in the reduction
        :return The embedding of the combination as a row vector
        """
        # STUDENT
        wordembeds = utils.concat_and_flatten([head_embed,modifier_embed])
        output = F.tanh(self.linear1(wordembeds))
        new_wordembeddings = self.linear2(output)
        return new_wordembeddings
Exemplo n.º 6
0
    def forward(self, inputs):
        """
        NOTE: Use utils.concat_and_flatten to combine all the features into one big
        row vector.

        :param inputs A list of autograd.Variables, which are all of the features we will use
        :return a Variable which is the log probabilities of the actions, of shape (1, 3)
            (it is a row vector, with an entry for each action)
        """
        x = utils.concat_and_flatten(inputs)

        out = self.afl1(x)
        out = self.relu(out)
        out = self.afl2(out)
        return F.log_softmax(out)
Exemplo n.º 7
0
    def forward(self, head_embed, modifier_embed):
        """
        HINT: use utils.concat_and_flatten() to combine head_embed and modifier_embed
        into a single tensor.

        :param head_embed The embedding of the head in the reduction
        :param modifier_embed The embedding of the modifier in the reduction
        :return The embedding of the combination as a row vector
        """
        # STUDENT
        inp_embed = utils.concat_and_flatten([head_embed, modifier_embed])
        hidden = self.fc1(inp_embed)
        hidden = self.tanh(hidden)
        output = self.fc2(hidden)
        return output
Exemplo n.º 8
0
    def forward(self, inputs):
        """
        NOTE: Use utils.concat_and_flatten to combine all the features into one big
        row vector.

        :param inputs A list of autograd.Variables, which are all of the features we will use
        :return a Variable which is the log probabilities of the actions, of shape (1, 3)
            (it is a row vector, with an entry for each action)
        """
        # STUDENT
        input_flat = utils.concat_and_flatten(inputs)
        ff = self.first(input_flat)
        relu = F.relu(ff)
        ss = self.second(relu)
        return F.log_softmax(ss)
Exemplo n.º 9
0
    def forward(self, inputs):
        """
        NOTE: Use utils.concat_and_flatten to combine all the features into one big
        row vector.

        :param inputs A list of autograd.Variables, which are all of the features we will use
        :return a Variable which is the log probabilities of the actions, of shape (1, 3)
            (it is a row vector, with an entry for each action)
        """
        # STUDENT 
        # END STUDENT
        wordembeds = utils.concat_and_flatten(inputs)
        output = F.relu(self.linear1(wordembeds))
        log_prob = F.log_softmax(self.linear2(output))
        return log_prob
Exemplo n.º 10
0
    def forward(self, inputs):
        """
        NOTE: Use utils.concat_and_flatten to combine all the features into one big
        row vector.

        :param inputs A list of autograd.Variables, which are all of the features we will use
        :return a Variable which is the log probabilities of the actions, of shape (1, 3)
            (it is a row vector, with an entry for each action)
        """
        # STUDENT
        inp = utils.concat_and_flatten(inputs)
        hidden = self.lin1(inp)
        hidden = self.relu(hidden)
        output = self.lin2(hidden)
        output = self.softmax(output)
        return output
Exemplo n.º 11
0
    def forward(self, head_embed, modifier_embed):
        """
        HINT: use utils.concat_and_flatten() to combine head_embed and modifier_embed
        into a single tensor.

        :param head_embed The embedding of the head in the reduction
        :param modifier_embed The embedding of the modifier in the reduction
        :return The embedding of the combination as a row vector
        """
        comb_emb = []
        comb_emb.append(head_embed)
        comb_emb.append(modifier_embed)
        x = utils.concat_and_flatten(comb_emb)
        out = self.mlpl1(x)
        out = self.tanh(out)
        out = self.mlpl2(out)
        return out
Exemplo n.º 12
0
    def forward(self, head_embed, modifier_embed):
        """
        HINT: use utils.concat_and_flatten() to combine head_embed and modifier_embed
        into a single tensor.

        :param head_embed The embedding of the head in the reduction
        :param modifier_embed The embedding of the modifier in the reduction
        :return The embedding of the combination as a row vector
        """
        # STUDENT
        flat_input = utils.concat_and_flatten([head_embed, modifier_embed])

        iter1 = self.layer1(flat_input)
        iter2 = self.layer2(iter1)
        iter3 = self.layer3(iter2)

        return iter3
Exemplo n.º 13
0
    def forward(self, inputs):
        """
        NOTE: Use utils.concat_and_flatten to combine all the features into one big
        row vector.

        :param inputs A list of autograd.Variables, which are all of the features we will use
        :return a Variable which is the log probabilities of the actions, of shape (1, 3)
            (it is a row vector, with an entry for each action)
        """
        # STUDENT
        flat_input = utils.concat_and_flatten(inputs)

        iter1 = self.layer1(flat_input)
        iter2 = self.layer2(iter1)
        iter3 = self.layer3(iter2)
        iter4 = self.layer4(iter3)

        return iter4
Exemplo n.º 14
0
 def forward(self, head_embed, modifier_embed):
     embeds = utils.concat_and_flatten([head_embed, modifier_embed])
     out = F.tanh(self.linear1(embeds))
     new_embeddings = self.linear2(out)
     return new_embeddings
Exemplo n.º 15
0
 def forward(self, inputs):
     embeds = utils.concat_and_flatten(inputs)
     out = F.relu(self.linear1(embeds))
     log_probs = F.log_softmax(self.linear2(out))
     return log_probs
Exemplo n.º 16
0
    def forward(self, head_embed, modifier_embed):
        embeds = utils.concat_and_flatten([head_embed, modifier_embed])
        lstm_out, self.hidden = self.lstm(embeds.view(1, 1, -1), self.hidden)

        return lstm_out[0]