예제 #1
0
            x = self.resampler(x)
        # Run the rnns
        for rnn_layer_ind in range(0, len(self.rnn_layers) - 1):
            x = torch.cat([x, self.rnn_layers[rnn_layer_ind](x)],
                          dim=-1)  # B x L x (Hx + Hout)
        # Final rnn
        x = self.rnn_layers[-1](x)
        # Apply the mask
        x = L.unpad_sequences(x, seq_lens)
        # Run the pooling and flatten
        x = self.pool(x)
        x = L.flatten(x)  # B x k*H

        for fc_layer in self.fc_layers:
            x = fc_layer(x)
        self.loss_in = x  # B x 6
        return self.loss_in
        # return F.sigmoid(self.loss_in)

    def reset_parameters(self):
        for layer in self.rnn_layers:
            layer.reset_parameters()
        self.pool.reset_parameters()
        for layer in self.fc_layers:
            layer.reset_parameters()
        if self.resample:
            self.resampler.reset_parameters()


registry.register_model("dense-rnn-emb", DenseRNNEmb)
예제 #2
0
        # Apply the mask
        if self.use_pool:
            x = L.unpad_sequences(x, seq_lens)
        # Do the pooling
        if self.use_multi_pool:
            x = self.concat([pool_i(x) for pool_i in self.pool])
        else:
            x = self.pool(x)
        x = L.flatten(x)  # B x k*H
        for fc_layer in self.fc_layers:
            x = fc_layer(x)
        self.loss_in = x  # B x 6
        # return F.sigmoid(self.loss_in)
        return self.loss_in

    def reset_parameters(self):
        for layer in self.rnn_layers:
            layer.reset_parameters()
        if self.use_multi_pool:
            for pool in self.pool:
                pool.reset_parameters()
        else:
            self.pool.reset_parameters()
        for layer in self.fc_layers:
            layer.reset_parameters()
        if self.resample:
            self.resampler.reset_parameters()


registry.register_model("rnn-emb", RNNEmb)
예제 #3
0
    def cast_target_to_torch(self, y, volatile=False):
        return Variable(torch.from_numpy(y).cuda().float(), volatile=volatile)

    def forward(self, x):
        # x is B x Si x Wj x E
        # print([[tuple(sent.size()) for sent in sample] for sample in x])
        # We run on each sentence first
        # Each sample is Si x Wj x E
        word_outs = x
        for word_layer in self.word_layers:
            word_outs = [word_layer(sample) for sample in word_outs]
        word_outs = [L.flatten(self.word_pool(sample)) for sample in word_outs]
        # print([tuple(sent.size()) for sent in word_outs])
        # Run it on each sentence
        sent_outs = word_outs
        for sent_layer in self.sent_layers:
            sent_outs = sent_layer(sent_outs)
        sent_outs = L.flatten(self.sent_pool(sent_outs))
        # print(tuple(sent_outs.size()))
        # Run the fc layer if we have one
        x = sent_outs
        for fc_layer in self.fc_layers:
            x = fc_layer(x)
        self.loss_in = x  # B x 6
        return self.loss_in
        # return F.sigmoid(self.loss_in)


registry.register_model("han", HAN)
예제 #4
0
        x = [self.dropout1(sample.unsqueeze(0)).squeeze(0) for sample in x]

        x = [
            self.m2(F.relu(self.conv2(sample.unsqueeze(0)))).squeeze(0)
            for sample in x
        ]  # B x 256 x Li-2
        # Do the batchnorm
        if self.batchnorm:
            x, lens = pad_torch_embedded_sequences(x, length_last=True)
            x = self.bn2(x)
            x = unpad_torch_embedded_sequences(x, lens, length_last=True)

        x = [self.dropout2(sample.unsqueeze(0)).squeeze(0) for sample in x]
        x = [sample.transpose(0, 1) for sample in x]

        x = self.rnn_module(x)
        x = J.flatten(x)  # B x 256

        # Run the fc layer if we have one
        if self.has_fc:
            x = self.dropout_fc(x)
            x = F.relu(self.fc_layer(x))
            x = self.bn_fc(x)

        x = self.dropout_fc_eval(x)
        self.loss_in = self.fc_eval(x)  # B x 6
        return F.sigmoid(self.loss_in)


registry.register_model("cnn-rnn-emb", CNNRNNEmb)
예제 #5
0
            x = rnn_layer(x)  # B x Li x H

        # Apply the attention
        x = self.att(x, seq_lens=seq_lens)  # B x 6 x H

        preds = [None for _ in range(6)]
        # Run the toxic output
        preds[0] = self.fc_layers[0](x[:, 0])  # B x 1
        # Run the other
        for i in range(1, 6):
            preds[i] = self.fc_layers[i](torch.cat([x[:, 0], x[:, i]], dim=-1))

        # Combine them
        x = torch.cat(preds, dim=-1)

        self.loss_in = x  # B x 6
        return self.loss_in

    def reset_parameters(self):
        for layer in self.rnn_layers:
            layer.reset_parameters()
        self.att.reset_parameters()
        for layers in self.fc_layers:
            for layer in layers:
                layer.reset_parameters()
        if self.resample:
            self.resampler.reset_parameters()


registry.register_model("mha-rnn-emb", MHARNNEmb)
예제 #6
0
        seq_lens = J.LongTensor([max(len(sample), self.min_len) for sample in x])
        x = np.array([L.pad_numpy_to_length(sample, length=seq_lens.max()) for sample in x], dtype=int)
        return self.embeddings(Variable(J.from_numpy(x).long(), volatile=volatile)), seq_lens

    def cast_target_to_torch(self, y, volatile=False):
        return Variable(J.from_numpy(y).float(), volatile=volatile)

    def forward(self, inputs):
        x, seq_lens = inputs
        for block in self.blocks:
            x, seq_lens = block(x, seq_lens)

        x, _ = self.global_pool(x, seq_lens)

        x = L.flatten(x)  # B x F*k
        # Run the fc layer if we have one
        for fc_layer in self.fc_layers:
            x = fc_layer(x)
        self.loss_in = x
        return self.loss_in

    def reset_parameters(self):
        for block in self.blocks:
            block.reset_parameters()
        self.global_pool.reset_parameters()
        for layer in self.fc_layers:
            layer.reset_parameters()


registry.register_model("cnn-emb", CNNEmb)
registry.register_model("dpcnn", DPCNN)