コード例 #1
0
 def forward(self, nl_tensor, nl_len_tensor, wombat_tensor=None):
     if self.use_selfatt:
         key_padding_mask = (nl_tensor == 0)
         en_inp = self.sembedding(nl_tensor).transpose(0, 1) * math.sqrt(self.norm_dim)
         if wombat_tensor is not None:
             en_inp += wombat_tensor
         en_out = self.encoder(en_inp, None, key_padding_mask).transpose(0, 1)
         features = torch.max_pool1d(en_out.transpose(1, -1), en_out.size(1)).squeeze(-1)
         en_score = self.scoring(features)
     else:
         device = nl_tensor.device
         # sort lengths of input tensors in the descending mode
         nl_tensor, nl_len_tensor, nl_ord_tensor, nl_recover_ord_tensor = self.sort_tensors(nl_tensor, nl_len_tensor)
         # en_inp = [batch, nl_len, nl_emb]
         en_inp = self.sembedding(nl_tensor)
         if wombat_tensor is not None:
             wombat_tensor = self.reorder_tensor(wombat_tensor, nl_ord_tensor, dim=0)
             en_inp += wombat_tensor
         # en_out = tensor(batch_size, seq_length, rnn_dim * num_directions)
         # en_hidden = (h_n,c_n) ---> h_n = tensor(num_layers *num_directions, batch_size, rnn_dim)
         en_out, en_hidden = self.encoder(en_inp, nl_len_tensor)
         if self.model_type == "cnn":
             features = torch.max_pool1d(en_out.transpose(1, -1), en_out.size(1)).squeeze(-1)
         else:
             features = self.encoder.get_last_hiddens(en_hidden)
         en_score = self.scoring(features)
         # recover the original order of outputs to compute loss
         en_score = self.reorder_tensor(en_score, nl_recover_ord_tensor, dim=0)
     return en_score
コード例 #2
0
    def forward(self, inputs):
        text_raw_indices = inputs[0]
        aspect_indices = inputs[1]

        context_len = torch.sum(text_raw_indices != 0, dim=-1)
        aspect_len = torch.sum(aspect_indices != 0, dim=-1)
        context = self.embed(
            text_raw_indices)  # [batch, max_seq_len, embed_dim]
        aspect = self.embed(aspect_indices)  # [batch, max_seq_len, embed_dim]

        # Aspect Vector max pool over time
        aspect = torch.transpose(aspect, 1,
                                 2)  # [batch, embed_dim, max_asp_len]
        aspect = aspect.unsqueeze(3)  # [batch, embed_dim, max_asp_len, 1]
        aspect = F.relu(
            self.cnn_aspect(aspect))  # [batch, in_channels, max_asp_len, 1]
        aspect_v = torch.max_pool1d(torch.squeeze(aspect, 3),
                                    kernel_size=aspect.shape[2])
        aspect_v = aspect_v.unsqueeze(2)

        # Context Vector
        context = torch.transpose(context, 1,
                                  2)  # [batch, embed_dim, max_seq_len]
        context = context.unsqueeze(3)  # [batch, embed_dim, max_seq_len ,1]
        aspect_v = aspect_v.expand(-1, -1, context.shape[2], -1)
        x = [torch.tanh(conv(context)) for conv in self.cnn_context_x]
        y = [F.relu(conv(context) + aspect_v) for conv in self.cnn_context_y]
        res = [i * j for i, j in zip(x, y)]
        res = torch.cat(res, 1)
        res = torch.max_pool1d(torch.squeeze(res, 3), kernel_size=res.shape[2])
        res = res.squeeze(2)
        res = self.dropout(res)
        out = self.dense(res)
        return out
コード例 #3
0
    def forward(self, inputs):
        text_raw_indices = inputs[0]
        aspect_indices = inputs[1]

        context_len = torch.tensor(torch.sum(text_raw_indices != 0, dim=-1),
                                   dtype=torch.float).to(self.opt.device)
        aspect_len = torch.tensor(torch.sum(aspect_indices != 0, dim=-1),
                                  dtype=torch.float).to(self.opt.device)
        context = self.embed(
            text_raw_indices)  # [batch, max_seq_len, embed_dim]
        aspect = self.embed(aspect_indices)  # [batch, max_seq_len, embed_dim]

        # Aspect Vector
        aspect = torch.transpose(aspect, 1,
                                 2)  # [batch, embed_dim, max_asp_len]
        aspect = aspect.unsqueeze(3)  # [batch, embed_dim, max_asp_len, 1]
        aspect = [conv(aspect) for conv in self.cnn_aspect]

        # Aspect Pool
        aspect_pool = [
            torch.max_pool1d(torch.squeeze(a, 3), kernel_size=a.shape[2])
            for a in aspect
        ]

        # Context Vector
        context = torch.transpose(context, 1, 2)
        context_v = context.unsqueeze(3)
        context = [conv(context_v) for conv in self.cnn_context]
        context_ = [conv(context_v) for conv in self.cnn_context_]

        # Context Pool
        context_pool = [
            torch.max_pool1d(torch.squeeze(c, 3), kernel_size=c.shape[2])
            for c in context
        ]

        # Gating
        s1 = [
            torch.tanh(c) * torch.sigmoid(a)
            for c, a, c_ in zip(context, aspect, context_)
        ]
        s2 = [
            torch.tanh(a) * torch.sigmoid(a)
            for c, a, c_ in zip(context, aspect, context_)
        ]
        s1 = torch.cat(s1, 1)
        s2 = torch.cat(s2, 1)

        res1 = torch.max_pool1d(s1.squeeze(3),
                                kernel_size=s1.shape[2]).squeeze(2)
        res2 = torch.max_pool1d(s2.squeeze(3),
                                kernel_size=s2.shape[2]).squeeze(2)

        x = torch.cat((res1, res2), dim=-1)
        x = self.dropout(x)
        out = self.dense(x)
        return out
コード例 #4
0
 def forward(self, x, lens):
     embed = self.dropout(self.emb(x)).unsqueeze(1)
     c1 = torch.relu(self.conv1(embed).squeeze(3))
     p1 = torch.max_pool1d(c1, c1.size()[2]).squeeze(2)
     c2 = torch.relu(self.conv2(embed).squeeze(3))
     p2 = torch.max_pool1d(c2, c2.size()[2]).squeeze(2)
     c3 = torch.relu(self.conv3(embed).squeeze(3))
     p3 = torch.max_pool1d(c3, c3.size()[2]).squeeze(2)
     pool = self.dropout(torch.cat((p1, p2, p3), 1))
     hidden = self.fc(pool)
     return self.softmax(hidden), self.log_softmax(hidden)
コード例 #5
0
    def forward(self, inputs):
        text_raw_indices = inputs[0]
        aspect_indices = inputs[1]
        pos_indices = inputs[2]
        aspect_pos_indices = inputs[3]
        position_indices = inputs[4]

        # Inputs
        context = self.embed(
            text_raw_indices)  # Dimensions: [batch, max_seq_len, embed_dim]
        aspect = self.embed(
            aspect_indices)  # Dimensions: [batch, max_seq_len, embed_dim]
        position = self.position_embed(position_indices)

        # Part-of-speech(POS) tags
        pos_tags = self.pos_embed(pos_indices)
        aspect_pos_tags = self.pos_embed(aspect_pos_indices)

        # Concat POS Tags
        context = torch.cat((context, pos_tags, position), dim=-1)
        aspect = torch.cat((aspect, aspect_pos_tags), dim=-1)

        # Aspect
        aspect = torch.transpose(aspect, 1,
                                 2)  # [batch, embed_dim, max_asp_len]
        aspect_v = aspect.unsqueeze(3)  # [batch, embed_dim, max_asp_len, 1]
        aspect = [conv(aspect_v) for conv in self.cnn_aspect]

        # Context
        context = torch.transpose(context, 1, 2)
        context_v = context.unsqueeze(3)
        context = [conv(context_v) for conv in self.cnn_context]

        # Concatenate all kernel outputs
        aspect2context = torch.cat(context, 1)
        context2aspect = torch.cat(aspect, 1)

        # Maxpool
        final_context = torch.max_pool1d(
            aspect2context.squeeze(3),
            kernel_size=aspect2context.shape[2]).squeeze(2)
        final_aspect = torch.max_pool1d(
            context2aspect.squeeze(3),
            kernel_size=context2aspect.shape[2]).squeeze(2)

        # FeatureVec
        final = torch.cat((final_context, final_aspect), dim=-1)
        final = self.dropout(final)
        out = self.dense(final)
        return out
コード例 #6
0
    def forward(self, inputSequence: torch.Tensor, token: Optional[torch.Tensor] = None,
                attentionMask: Optional[torch.Tensor] = None) \
            -> torch.Tensor:
        """
        Performs forward phase on the model.

        :param inputSequence: Parsed input tokens.
        :type inputSequence: torch.Tensor
        :param token: Token that is used in the first layer attention.
            If you will not pass this token than this token will be the same as first inputSequence token.
        :type token: Optional[torch.Tensor]
        :param attentionMask: Mask to avoid attention on (padding) token indices. 1 NOT  MASKED, 0 for MASKED.
        :type attentionMask: torch.Tensor
        :return: The output of convolver in form of sequence:
            BATCH x TIME x HIDDEN_SIZE
        :rtype: torch.Tensor
        """

        wordEmbeddings = self.embeddings(inputSequence)

        if token is None:
            token = self.embeddings(inputSequence[:, 0])

        transformed = self.layers[0](token, wordEmbeddings, attentionMask)
        for layer in self.layers[1:]:
            newToken = torch.max_pool1d(transformed.permute(0, 2, 1), transformed.shape[1]).squeeze(
                2)  # we take the max over time dimension for each feature
            transformed = layer(newToken, transformed, attentionMask)

        return transformed
コード例 #7
0
    def forward(self, x):
        # x : [batch size, seq len, input dim]
        if x.size(1) < self.max_kernel_size:
            pd = [0, 0, 0, self.max_kernel_size - x.size(1)]

            # [batch size, max seq len, input dim]
            x = f.pad(x, pd, 'constant', 0)

        # x : [batch size, kernel num, max seq len, input dim]
        x = x.unsqueeze(1)

        # x : [batch size, kernel num, max seq_len - width]
        x = [torch.relu(conv(x).squeeze(-1)) for conv in self.convs]

        x = [torch.max_pool1d(x_, x_.size(-1)).squeeze(-1) for x_ in x]

        # [batch size, sum(kernel_num)]
        x = torch.cat(x, dim=-1)

        x = self.highway_layer.forward(x)

        # [batch size, 1]
        # logit = torch.sigmoid(self.feedforward_layer(self.dropout_layer(x)))
        logit = self.feedforward_layer(self.dropout_layer(x))
        # [batch size]
        return logit
コード例 #8
0
    def forward(self, x):
        """
           x : Tensor(B, L)
        """
        B = x.shape[0]
        x_len = [sum(x[i] != self.padding_idx) for i in range(B)]

        x = self.embedding(x)
        #  x = self.dropout(x)
        x = pack_padded_sequence(x, x_len, batch_first=True)
        x, _ = self.LSTM(x)
        x = pad_packed_sequence(x, batch_first=True)
        x = x[0]

        y = self.vec.repeat(B, 1, 1)
        e = torch.bmm(x, y)  # (B,L,1)
        mask = torch.ones_like(e)
        for i in range(B):
            mask[i, :x_len[i]] = 0
        e.data.masked_fill_(mask.bool(), -1e30)
        e = F.softmax(e, dim=1)
        att = torch.bmm(e.transpose(1, 2), x).squeeze(1)

        x_m = x.transpose(1, 2)
        x_max = torch.max_pool1d(x_m, x_m.shape[-1]).squeeze(-1)
        x_avg = torch.avg_pool1d(x_m, x_m.shape[-1]).squeeze(-1)

        out = torch.cat((x_max, x_avg, att), 1)  #(B,6H)
        out = self.fc(out)
        return out
コード例 #9
0
    def forward(self, x):
        lens = [len(item) for item in x]
        max_len = max(lens)

        encodes = self.encoder([tree for tree_seq in x for tree in tree_seq],
                               sum(lens))

        seq, start, end = [], 0, 0
        for i in range(self.batch_size):
            end += lens[i]
            if max_len - lens[i]:
                seq.append(self.get_zeros(max_len - lens[i]))
            seq.append(encodes[start:end])
            start = end
        encodes = torch.cat(seq)
        encodes = encodes.view(self.batch_size, max_len, -1)

        # gru
        gru_out, hidden = self.bigru(encodes, self.init_hidden())

        gru_out = torch.transpose(gru_out, 1, 2)
        # pooling
        gru_out = torch.max_pool1d(gru_out, gru_out.size(2)).squeeze(2)
        # gru_out = gru_out[:,-1]

        # linear
        return self.hidden2label(gru_out)
コード例 #10
0
    def forward(self, sequences_batch, sen_per_length, sequences_lengths):
        batch_size = sequences_batch.size(0)
        max_sen_num = sequences_batch.size(1)
        dim = sequences_batch.size(3)
        sequences_batch = sequences_batch.view(batch_size * max_sen_num, -1,
                                               dim)
        packed_batch = nn.utils.rnn.pack_padded_sequence(sequences_batch,
                                                         sen_per_length,
                                                         batch_first=True,
                                                         enforce_sorted=False)

        outputs, _ = self._encoder(packed_batch)
        outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs,
                                                      batch_first=True)

        outputs = torch.transpose(outputs, 1, 2)
        outputs = torch.max_pool1d(outputs, outputs.size(2))
        outputs = torch.squeeze(outputs, 2)
        sen_dim = outputs.size(1)
        outputs = outputs.view(batch_size, -1, sen_dim)

        packed_batch = nn.utils.rnn.pack_padded_sequence(outputs,
                                                         sequences_lengths,
                                                         batch_first=True,
                                                         enforce_sorted=False)
        outputs, _ = self._encoder1(packed_batch)
        outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs,
                                                      batch_first=True)

        return outputs
コード例 #11
0
    def forward(self, x, mask, domain_mask):
        # x : [batch size, seq len, input dim]

        mask = mask.transpose(-1, -2)
        x = x.masked_fill(mask == 0, 0)

        if x.size(1) < self.max_kernel_size:
            pd = [0, 0, 0, self.max_kernel_size - x.size(1)]

            # [batch size, max seq len, input dim]
            x = f.pad(x, pd, 'constant', 0)

        # x : [batch size, kernel num, max seq len, input dim]
        x = x.unsqueeze(1)

        # x : [batch size, kernel num, max seq_len - width]
        x = [torch.relu(conv(x).squeeze(-1)) for conv in self.convs]

        x = [torch.max_pool1d(x_, x_.size(-1)).squeeze(-1) for x_ in x]

        # [batch size, sum(kernel_num)]
        x = torch.cat(x, dim=-1)

        logits = self.feedforward_layer(self.dropout_layer(x))
        logits = torch.masked_fill(logits, domain_mask == 0, -1e9)

        return logits
 def forward(self, sents, aspect, aspectIndex):
     sents_len = torch.sum(sents != 0, dim=-1)
     aspect_len = torch.sum(aspect != 0, dim=-1)
     sents_embedding = self.wordEmbedding(
         sents)  #[batch_size,sequence_length,embedding_w]
     lstm_sent, (_, _) = self.bilstm1(
         sents_embedding)  #[batch_size,sequence_length,2*embedding_h]
     #context_preserving_transformation
     aspect_embedding = self.wordEmbedding(
         aspect)  #[batch_size,aspect_length,embedding_w]
     lstm_aspect, (_, _) = self.bilstm2(
         aspect_embedding)  #[batch_size,aspect_length,2*embedding_h]
     for i in range(L):
         lstm_aspect = lstm_aspect.permute(
             0, 2, 1)  #[batch_size,2*embedding_h,aspect_length]
         F_hihj = torch.softmax(
             torch.bmm(lstm_sent, lstm_aspect),
             2)  #[batch_size,sequence_length,aspect_length]
         lstm_aspect = lstm_aspect.permute(0, 2, 1)
         ri = torch.bmm(
             F_hihj,
             lstm_aspect)  #[batch_size,sequence_length,2*embedding_h]
         hi_l = torch.relu(self.Wt(torch.cat(
             (lstm_sent, ri),
             2)))  #[batch_size,sequence_length,2*embedding_h]
         lstm_sent = hi_l + lstm_sent  #[batch_size,sequence_length,2*embedding_h]
         lstm_sent = self.position(lstm_sent, aspectIndex, aspect_len,
                                   sents_len)
     lstm_sent = lstm_sent.permute(
         0, 2, 1)  #[batch_size,2*embedding_h,sequence_length]
     lstm_sent = torch.max_pool1d(self.conv(lstm_sent),
                                  lstm_sent.shape[2]).squeeze(
                                      2)  #[batch_size,filter_num]
     pred = self.predict(lstm_sent)
     return pred
コード例 #13
0
    def forward(self, batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask):
        char_batch_size = batch_char.size(0)
        char_embeds = self.char_embeddings(batch_char)
        char_embeds = self.char_drop(char_embeds)
        char_embeds = char_embeds.transpose(1, 2)  # 将max_length和embedding_dim转置
        char_cnn_out = self.char_cnn(char_embeds)  #
        char_cnn_out = torch.max_pool1d(char_cnn_out, kernel_size=char_cnn_out.size(2)).view(char_batch_size, -1)
        char_cnn_out = char_cnn_out[batch_charrecover]  # 还原排序之前的batch
        char_features = char_cnn_out.view(batch_word.size(0), batch_word.size(1), -1)

        feat_embs = self.feature_embeddings(batch_features)

        word_embs = self.word_embeddings(batch_word)
        word_embs = torch.cat([word_embs, char_features, feat_embs], 2)
        word_represent = self.word_drop(word_embs)

        # lstm
        packed_words = pack_padded_sequence(word_represent, batch_wordlen.cpu().numpy(), batch_first=True)
        hidden = None
        lstm_out, hidden = self.lstm(packed_words, hidden)
        lstm_out, _ = pad_packed_sequence(lstm_out)
        lstm_out = self.droplstm(lstm_out.transpose(1, 0))

        outputs = self.hidden2tag(lstm_out)

        scores, tag_seq = self.crf._viterbi_decode(outputs, mask)
        return tag_seq
コード例 #14
0
    def calculate_loss(self, batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover,
                       batch_label, mask):
        char_batch_size = batch_char.size(0)
        char_embeds = self.char_embeddings(batch_char)  # (530, 10, 300)  # 10表示最大字符长度
        char_embeds = self.char_drop(char_embeds)  # (530, 10, 300)
        char_embeds = char_embeds.transpose(1, 2)  # 将max_length和embedding_dim转置 (batch*max_char_len, dim, max_length)
        char_cnn_out = self.char_cnn(char_embeds)  # (530,50,10)
        char_cnn_out = torch.max_pool1d(char_cnn_out, kernel_size=char_cnn_out.size(2)).view(char_batch_size, -1)  # (530, 50) 在词的维度做池化
        char_cnn_out = char_cnn_out[batch_charrecover]  # 还原到word降序的时刻
        char_features = char_cnn_out.view(batch_word.size(0), batch_word.size(1), -1)  # (10,53,50) # 还原到词的维度

        feat_embs = self.feature_embeddings(batch_features)  # (10,53,5)

        word_embs = self.word_embeddings(batch_word)  # (10,53,300)
        word_embs = torch.cat([word_embs, char_features, feat_embs], 2)  # (10,53,355)
        word_represent = self.word_drop(word_embs)

        # lstm
        packed_words = pack_padded_sequence(word_represent, batch_wordlen.cpu().numpy(), batch_first=True)
        hidden = None
        lstm_out, hidden = self.lstm(packed_words, hidden)
        lstm_out, _ = pad_packed_sequence(lstm_out)
        lstm_out = self.droplstm(lstm_out.transpose(1, 0))

        outputs = self.hidden2tag(lstm_out)

        total_loss = self.crf.neg_log_likelihood_loss(outputs, mask, batch_label)
        scores, tag_seq = self.crf._viterbi_decode(outputs, mask)

        return total_loss, tag_seq
コード例 #15
0
    def forward(self, x, y):
        """
            x : Tensor(B,L1)
            y : Tensor(B,L2)
            x_len : list[int]
            y_len : list[int]
            mask : Tensor(B,L1,L2) 
        """
        B = x.shape[0]
        x, x_len = self.step(x)
        y, y_len = self.step(y)
        y_T = y.transpose(1, 2)
        e = torch.bmm(x, y_T)
        mask = torch.ones_like(e)

        for i in range(B):
            mask[i, :x_len[i], :y_len[i]] = 0

    #    print(x_len)
    #   print(y_len)
    #  print(e[0])
        e.data.masked_fill_(mask.bool(), float("-inf"))
        # print(e[0])
        e_x = F.softmax(e, dim=1)
        e_y = F.softmax(e, dim=2)
        e_x.data.masked_fill_(mask.bool(), 0.0)
        e_y.data.masked_fill_(mask.bool(), 0.0)
        #print(e_x)
        x_att = torch.bmm(e_y, y)  # (B, L1, 2H)
        y_att = torch.bmm(e_x.transpose(1, 2), x)

        x_m = torch.cat((x, x_att, x - x_att, x * x_att), 2)  #(B, L1, 8H)
        y_m = torch.cat((y, y_att, y - y_att, y * y_att), 2)
        x_m = x_m.transpose(1, 2)
        y_m = y_m.transpose(1, 2)
        x_m = self.dropout(x_m)
        y_m = self.dropout(y_m)

        x_max = torch.max_pool1d(x_m, x_m.shape[-1]).squeeze(-1)  # (B, 8H)
        x_avg = torch.avg_pool1d(x_m, x_m.shape[-1]).squeeze(-1)
        y_max = torch.max_pool1d(y_m, y_m.shape[-1]).squeeze(-1)
        y_avg = torch.avg_pool1d(y_m, y_m.shape[-1]).squeeze(-1)
        out = torch.cat((x_max, x_avg, y_max, y_avg), 1)  #(B,32H)
        #   out = torch.cat((y_max, y_avg), 1) #(B,32H)
        out = self.dropout(out)
        out = self.fc(out)
        return out
コード例 #16
0
    def forward(self, y):
        y = y.transpose(1, 2)
        h1_group = self.conv1_group(y)
        h1_nogroup = self.conv1_nogroup(y)
        h1 = torch.cat([h1_group, h1_nogroup], dim=1)
        h1 = torch.relu(h1)
        h1 = torch.max_pool1d(h1, kernel_size=5, stride=5)

        h2 = self.conv2(h1)
        h2 = torch.relu(h2)
        h2 = torch.max_pool1d(h2, kernel_size=5, stride=5)

        h3 = self.conv3(h2)
        h3 = torch.relu(h3)
        h3 = torch.max_pool1d(h3, kernel_size=5, stride=5)

        return h3.transpose(1, 2), h1.transpose(1, 2)
コード例 #17
0
    def forward(self, y):
        h1 = self.conv1(y.transpose(1, 2))
        h1 = torch.relu(h1)
        h1 = torch.max_pool1d(h1, kernel_size=5, stride=5)

        h2 = self.conv2(h1)
        h2 = torch.relu(h2)
        h2 = torch.max_pool1d(h2, kernel_size=5, stride=5)

        h3 = self.conv3(h2)
        h3 = torch.relu(h3)
        h3 = torch.max_pool1d(h3, kernel_size=5, stride=5)

        h4 = self.conv3(h3)
        h4 = torch.relu(h4)
        h4 = torch.max_pool1d(h4, kernel_size=5, stride=5)

        return h4.transpose(1, 2)
コード例 #18
0
 def forward(self, x):
     '''
     x: (batch, max_word_length, embedding_size)
     return: (batch, embedding_size)
     '''
     x = x.permute(0, 2, 1)
     x = F.relu(self.conv1d(x))
     x = torch.max_pool1d(x, kernel_size=x.shape[-1]).squeeze(dim=-1)
     return x
コード例 #19
0
 def forward(self, x):
     x = self.word_embedding(x)#嵌入层处理
     x = x.permute(1, 0, 2).unsqueeze(1)#permute置换顺序 unsqueeze在第一维插入一个维度
     x = [torch.relu(conv(x)).squeeze(3) for conv in self.convs]#卷积层
     x = [torch.max_pool1d(h, h.size(2)).squeeze(2) for h in x]#池化层
     x = torch.cat(x, 1)
     x = self.dropout(x)#防止过拟合
     logits = self.fc(x)#返回logits
     return logits
コード例 #20
0
 def max_pool_on_seq(tensor):
     """
     一般tensor为三维矩阵,pool的层级一般是seq_len层级
     :param tensor:
     :return:
     """
     max_p = torch.max_pool1d(tensor.transpose(1, 2),
                              tensor.size(1)).squeeze(-1)
     return max_p
コード例 #21
0
ファイル: model.py プロジェクト: leoTOT/datamining
 def forward(self, x):
     x = self.word_embedding(x)
     x = x.permute(1, 0, 2).unsqueeze(1)
     x = [torch.relu(conv(x)).squeeze(3) for conv in self.convs]
     x = [torch.max_pool1d(h, h.size(2)).squeeze(2) for h in x]
     x = torch.cat(x, 1)
     x = self.dropout(x)
     logits = self.fc(x)
     return logits
コード例 #22
0
    def forward(self, x):
        x_6 = self.normalize_6(x[:, :, :6]).transpose(1, 2)
        x_mfcc = self.normalize_mfcc(x[:, :, 6:]).transpose(1, 2)
        x = torch.cat([x_mfcc, x_6], dim=1)

        h1 = self.conv1(x)
        h1 = torch.relu(self.ln1(h1.transpose(1, 2))).transpose(1, 2)
        h1 = torch.max_pool1d(h1, kernel_size=9, stride=1, padding=4)
        h2 = self.conv2(h1)
        h2 = torch.relu(self.ln2(h2.transpose(1, 2))).transpose(1, 2)
        h2 = torch.max_pool1d(h2, kernel_size=9, stride=1, padding=4)
        h3 = self.conv3(h2)
        h3 = torch.relu(self.ln3(h3.transpose(1, 2))).transpose(1, 2)
        h3 = torch.max_pool1d(h3, kernel_size=9, stride=1, padding=4)

        out_all = torch.cat([h1, h2, h3, x], dim=1)

        return h3.transpose(1, 2), out_all.transpose(1, 2)
コード例 #23
0
 def forward(self, x):
     x = x.unsqueeze(1)  # (N,Ci,W,D)
     x = [torch.relu(conv(x)).squeeze(3)
          for conv in self.convs]  # len(Ks)*(N,Knum,W)
     x = [torch.max_pool1d(line, line.size(2)).squeeze(2)
          for line in x]  # len(Ks)*(N,Knum)
     x = torch.cat(x, 1)  # (N,Knum*len(Ks))
     logit = self.fc(x)
     return logit
コード例 #24
0
ファイル: classifier.py プロジェクト: DJJune/train_framework
    def forward(self, x):
        ''' x shape => [batch_size, seq_len, hidden_size]
        '''
        batch_size, seq_len, hidden_size = x.size()
        x = x.view(batch_size, hidden_size, seq_len)
        out = torch.max_pool1d(x, kernel_size=seq_len)
        out = out.squeeze()
        out = self.fc(out)

        return out
コード例 #25
0
    def max_pool1d(self, x, seq_lens):
        # x:[N,L,O_in]
        out = []
        for index, t in enumerate(x):
            t = t[:seq_lens[index], :]
            t = torch.t(t).unsqueeze(0)
            out.append(torch.max_pool1d(t, t.size(2)))

        out = torch.cat(out).squeeze(2)
        return out
コード例 #26
0
    def forward(self, x, y):
        y = y.transpose(1, 2)
        h1_group = torch.relu(self.conv1_group(y))
        h1_nogroup = torch.relu(self.conv1_nogroup(y))
        h1 = torch.cat([h1_group, h1_nogroup], dim=1)
        h1 = torch.max_pool1d(h1, kernel_size=5, stride=5)

        h2 = self.conv2(h1)
        h2 = torch.relu(h2)
        h2 = torch.max_pool1d(h2, kernel_size=5, stride=5)

        h3 = self.conv3(h2)
        h3 = torch.relu(h3)

        h1 = h1.transpose(1, 2)
        h2 = h2.transpose(1, 2)
        h3 = h3.transpose(1, 2)

        real_fake = self.out(h3)

        return real_fake, h1
コード例 #27
0
    def forward(self, x, y):
        _, _, L3_feature, _ = self.ACM_Freeze(x, y)
        _, _, Dr_feature, _ = self.ACM_unFreeze(x, y)
        cat_feature = torch.cat([L3_feature, Dr_feature],
                                dim=2).transpose(1, 2)

        h1 = self.conv1(cat_feature)
        h1 = torch.relu(h1)
        h1 = torch.max_pool1d(h1, kernel_size=5, stride=5)

        h2 = self.conv2(h1)
        h2 = torch.relu(h2)
        h2 = torch.max_pool1d(h2, kernel_size=5, stride=5)

        h3 = self.conv3(h2)
        h3 = torch.relu(h3)
        h3 = torch.max_pool1d(h3, kernel_size=5, stride=5)

        real_fake = self.out(h3.transpose(1, 2))

        return real_fake, cat_feature.transpose(1, 2)
コード例 #28
0
 def forward(self,source,aspect):
     source_embedding=self.wordEmbedding(source).permute(0,2,1)#[batch_size,embedding_dim,sequence]
     aspect_embedding=self.wordEmbedding(aspect).permute(0,2,1)#[batch_size,embedding_dim,aspect_length]
     aspect_embedding_map=F.relu(self.aspect_map(aspect_embedding))#[batch_size,filter_num,aspect_length]
     aspect_embedding_max=torch.max_pool1d(aspect_embedding_map,aspect_embedding_map.size(2))#[batch_size,filter_num]
     conv1_result=[F.tanh(conv(source_embedding)) for conv in self.convolution1]
     conv2_result=[F.relu(conv(source_embedding) + aspect_embedding_max) for conv in self.convolution2]
     conv_result=[x*y for x,y in zip(conv1_result,conv2_result)]
     pool_result=[F.max_pool1d(i,i.shape[2]).squeeze(2) for i in conv_result]
     cat_result=torch.cat(pool_result,1)
     result=F.softmax(self.linear(cat_result))
     return result
コード例 #29
0
 def forward(self, x):
     x = self.word_embedding(x)  # [len(sent), batch_size, embed_dim]
     x = x.permute(1, 0, 2).unsqueeze(1)  #将tensor的维度换位,在第二维度增加一个维度
     #[batch_size, 1, len(sent), embed_dim]
     x = [torch.relu(conv(x)).squeeze(3) for conv in self.convs]  #将第四维去掉
     #[batch_size, num_kernel, len(sent)-kernel+1, 1]
     #[batch_size, num_kernel, len(sent)-kernel+1]
     x = [torch.max_pool1d(h, h.size(2)).squeeze(2)
          for h in x]  #[batch_size, num_kernel]
     x = torch.cat(x, 1)  #将张量(tensor)列拼接在一起
     x = self.dropout(x)
     logits = self.fc(x)
     return logits
コード例 #30
0
 def embedd_and_forward(self, x):
     conv1d_1 = self.conv1d_1(x)
     max_pooling1d_1 = torch.max_pool1d(conv1d_1,
                                        kernel_size=(4, ),
                                        stride=(4, ),
                                        padding=0,
                                        ceil_mode=False)
     conv1d_2 = self.conv1d_2(max_pooling1d_1)
     max_pooling1d_2 = torch.max_pool1d(conv1d_2,
                                        kernel_size=(4, ),
                                        stride=(4, ),
                                        padding=0,
                                        ceil_mode=False)
     conv1d_3 = self.conv1d_3(max_pooling1d_2)
     max_pooling1d_3 = torch.max_pool1d(conv1d_3,
                                        kernel_size=(4, ),
                                        stride=(4, ),
                                        padding=0,
                                        ceil_mode=False)
     conv1d_4 = self.conv1d_4(max_pooling1d_3)
     max_pooling1d_4 = torch.max_pool1d(conv1d_4,
                                        kernel_size=(4, ),
                                        stride=(4, ),
                                        padding=0,
                                        ceil_mode=False)
     conv1d_5 = self.conv1d_5(max_pooling1d_4)
     global_max_pooling1d_1 = torch.max_pool1d(
         input=conv1d_5, kernel_size=conv1d_5.size()[2:])
     global_average_pooling1d_1 = torch.avg_pool1d(
         input=conv1d_5, kernel_size=conv1d_5.size()[2:])
     global_max_pooling1d_1_flatten = global_max_pooling1d_1.view(
         global_max_pooling1d_1.size(0), -1)
     global_average_pooling1d_1_flatten = global_average_pooling1d_1.view(
         global_average_pooling1d_1.size(0), -1)
     concatenate_1 = torch.cat((global_max_pooling1d_1_flatten,
                                global_average_pooling1d_1_flatten), 1)
     dense_1 = self.dense_1(concatenate_1)
     activation_1 = torch.sigmoid(dense_1)
     return activation_1