コード例 #1
0
ファイル: cnn_emb.py プロジェクト: abhmul/toxic-comments
    def forward(self, inputs):
        x, seq_lens = inputs
        for block in self.blocks:
            x, seq_lens = block(x, seq_lens)

        x, _ = self.global_pool(x, seq_lens)

        x = L.flatten(x)  # B x F*k
        # Run the fc layer if we have one
        for fc_layer in self.fc_layers:
            x = fc_layer(x)
        self.loss_in = x
        return self.loss_in
コード例 #2
0
ファイル: rnn_emb.py プロジェクト: abhmul/toxic-comments
    def legacy_forward(self, x):
        # Apply the resampler if necessary
        if self.resample:
            x = self.resampler(x)
        for rnn_layer in self.rnn_layers:
            x = rnn_layer(x)  # B x Li x H
        x = self.pool(x)
        # For testing purposes only
        # x = self.att(x)
        x = L.flatten(x)  # B x k*H

        for fc_layer in self.fc_layers:
            x = fc_layer(x)
        self.loss_in = x  # B x 6
        return self.loss_in
コード例 #3
0
ファイル: cnn_emb.py プロジェクト: abhmul/toxic-comments
    def forward(self, inputs):
        x, seq_lens = inputs
        # Do the conv layers
        for conv in self.conv_layers:
            x, seq_lens = conv(x, seq_lens)
            x, seq_lens = self.pool(x, seq_lens)

        # Do the global pooling
        x, seq_lens = self.global_pool(x, seq_lens)

        x = L.flatten(x)  # B x k*H
        for fc_layer in self.fc_layers:
            x = fc_layer(x)
        self.loss_in = x
        return self.loss_in
コード例 #4
0
    def forward(self, x):
        x, seq_lens = x
        # Apply the resampler if necessary
        if self.resample:
            x = self.resampler(x)
        # Run the rnns
        for rnn_layer_ind in range(0, len(self.rnn_layers) - 1):
            x = torch.cat([x, self.rnn_layers[rnn_layer_ind](x)],
                          dim=-1)  # B x L x (Hx + Hout)
        # Final rnn
        x = self.rnn_layers[-1](x)
        # Apply the mask
        x = L.unpad_sequences(x, seq_lens)
        # Run the pooling and flatten
        x = self.pool(x)
        x = L.flatten(x)  # B x k*H

        for fc_layer in self.fc_layers:
            x = fc_layer(x)
        self.loss_in = x  # B x 6
        return self.loss_in
コード例 #5
0
ファイル: rnn_emb.py プロジェクト: abhmul/toxic-comments
    def forward(self, x):
        if self.legacy:
            return self.legacy_forward(x)
        x, seq_lens = x
        # Apply the resampler if necessary
        if self.resample:
            x = self.resampler(x)
        for rnn_layer in self.rnn_layers:
            x = rnn_layer(x)  # B x Li x H

        # Apply the mask
        if self.use_pool:
            x = L.unpad_sequences(x, seq_lens)
        # Do the pooling
        if self.use_multi_pool:
            x = self.concat([pool_i(x) for pool_i in self.pool])
        else:
            x = self.pool(x)
        x = L.flatten(x)  # B x k*H
        for fc_layer in self.fc_layers:
            x = fc_layer(x)
        self.loss_in = x  # B x 6
        # return F.sigmoid(self.loss_in)
        return self.loss_in