Example #1
0
    def forward(self, x):
        """
        Calculate the output.

        Parameters
        ----------
        x : ``torch.LongTensor``, required.
            the input tensor, of shape (seq_len, batch_size, input_dim).

        Returns
        ----------
        output: ``torch.FloatTensor``.   
            The output of RNNs.
        """
        if self.droprate > 0:
            new_x = F.dropout(x, p=self.droprate, training=self.training)
        else:
            new_x = x

        out, new_hidden = self.layer(new_x, self.hidden_state)

        self.hidden_state = utils.repackage_hidden(new_hidden)

        out = out.contiguous()

        return torch.cat([x, out], 2)
Example #2
0
    def forward(self, x):
        # set_trace()
        out, new_hidden = self.layer(x, self.hidden_state)

        self.hidden_state = utils.repackage_hidden(new_hidden)

        if self.droprate > 0:
            out = F.dropout(out, p=self.droprate, training=self.training)

        return out
    def forward(self, x):

        if self.droprate > 0:
            new_x = F.dropout(x, p=self.droprate, training=self.training)
        else:
            new_x = x

        out, new_hidden = self.layer(new_x, self.hidden_state)

        self.hidden_state = utils.repackage_hidden(new_hidden)

        out = out.contiguous()

        return torch.cat([x, out], 2)
    def forward(self, x):

        out = 0
        # n_w = F.softmax(self.unit_weight, dim=0)
        for ind in range(self.unit_number):
            nout, new_hidden = self.unit_list[ind](x[ind],
                                                   self.hidden_list[ind])
            self.hidden_list[ind] = utils.repackage_hidden(new_hidden)
            out = out + nout
            # out = out + n_w[ind] * self.unit_number * nout

        if self.droprate > 0:
            out = F.dropout(out, p=self.droprate, training=self.training)

        x.append(out)

        return x
Example #5
0
    def forward(self, x, p_out):

        if self.droprate > 0:
            new_x = F.dropout(x, p=self.droprate, training=self.training)
        else:
            new_x = x

        out, new_hidden = self.layer(new_x, self.hidden_state)

        self.hidden_state = utils.repackage_hidden(new_hidden)

        out = out.contiguous()

        if self.training and random.uniform(0, 1) < self.layer_drop:
            deep_out = torch.autograd.Variable( torch.zeros(x.size(0), x.size(1), self.increase_rate) ).cuda()
        else:
            deep_out = out

        o_out = torch.cat([p_out, out], 2)
        d_out = torch.cat([x, deep_out], 2)
        return d_out, o_out
Example #6
0
    def forward(self, x):
        """
        Calculate the output.

        Parameters
        ----------
        x : ``torch.LongTensor``, required.
            the input tensor, of shape (seq_len, batch_size, input_dim).

        Returns
        ----------
        output: ``torch.FloatTensor``.   
            The output of RNNs.
        """
        out, new_hidden = self.layer(x, self.hidden_state)

        self.hidden_state = utils.repackage_hidden(new_hidden)
        
        if self.droprate > 0:
            out = F.dropout(out, p=self.droprate, training=self.training)

        return out
Example #7
0
    def forward(self, x, p_out):
        """
        Calculate the output.

        Parameters
        ----------
        x : ``torch.LongTensor``, required.
            the input tensor, of shape (seq_len, batch_size, input_dim).
        p_out : ``torch.LongTensor``, required.
            the final output tensor for the softmax, of shape (seq_len, batch_size, input_dim).

        Returns
        ----------
        out: ``torch.FloatTensor``.
            The undropped outputs of RNNs to the softmax.
        p_out: ``torch.FloatTensor``.
            The dropped outputs of RNNs to the next_layer.
        """
        if self.droprate > 0:
            new_x = F.dropout(x, p=self.droprate, training=self.training)
        else:
            new_x = x

        out, new_hidden = self.layer(new_x, self.hidden_state)

        self.hidden_state = utils.repackage_hidden(new_hidden)

        out = out.contiguous()

        if self.training and random.uniform(0, 1) < self.layer_drop:
            deep_out = torch.autograd.Variable(
                torch.zeros(x.size(0), x.size(1), self.increase_rate)).cuda()
        else:
            deep_out = out

        o_out = torch.cat([p_out, out], 2)
        d_out = torch.cat([x, deep_out], 2)
        return d_out, o_out