示例#1
0
    def forward(self, x, beat):#
        # print(beat.shape)
        # beat = x[:,:,-256:]
        x = self.stem_conv(x)
        x = self.layers(x)
        x = self.head_conv(x)

        
        # print(beat.shape)
        # beat = self.googlenet(beat)
        beat = self.beat_stem_conv(beat)
        beat = self.beat_layers(beat)
        beat = self.beat_head_conv(beat)
        # print(x.shape)
        # print(beat.shape)

        # print(x.shape)
        # x = self.avgpool(x)
        max_pooled = F.adaptive_max_pool1d(x, 1)
        avg_pooled = F.adaptive_avg_pool1d(x, 1)
        x = torch.cat([max_pooled, avg_pooled], dim=1)
        x = x.view(x.size(0), -1)


        beat = torch.cat([F.adaptive_max_pool1d(beat, 1), F.adaptive_avg_pool1d(beat, 1)], dim=1)
        beat = beat.view(beat.size(0), -1)

        # print(x.shape)
        # print(beat.shape)
        x = torch.cat([x, beat], dim=1)
        x = self.classifier(x)
        # x = self.dropout(x)
        return x
示例#2
0
    def forward(self, text):
        embedded = self.embedding(text)
        #print("embedding",embedded.shape)
        embedded = embedded.unsqueeze(1)
        #print("embedding unsqueeze",embedded.shape)
        pooled_0 = F.adaptive_avg_pool1d(
            F.relu(self.conv_0(embedded).squeeze(3)),
            50).squeeze(2)  # convolution and pooling layers
        pooled_1 = F.adaptive_avg_pool1d(
            F.relu(self.conv_1(embedded).squeeze(3)), 50).squeeze(2)
        pooled_2 = F.adaptive_avg_pool1d(
            F.relu(self.conv_2(embedded).squeeze(3)), 50).squeeze(2)
        #print('pooled_0', pooled_0.shape)

        cat = torch.cat((pooled_0, pooled_1, pooled_2),
                        dim=2)  # concenate results together
        #print('cat',cat.shape)
        cat = self.dropout(cat)
        #print('cat', cat.shape)
        cat = cat.transpose(1, 2).transpose(0, 1)
        #p=cat
        #p = F.tanh(p)
        # output, hidden = self.gru(p)
        #print('cat', cat.shape)
        output, (hn, cn) = self.lstm(cat)
        k = self.fc(hn.squeeze(0))
        return k
示例#3
0
    def forward(self, tgt, states, src, t, train):

        tgt = self.dropout(self.embedder.forward(tgt))

        seq_lens = tgt.size(1)
        dim = tgt.size(2)

        tgt += position_encoding_init(n_position=seq_lens, emb_dim=dim)

        tgt = self.convlayers.forward(tgt)

        seq_lens = tgt.size(1)

        if train:
            tgt = tgt
            states = F.adaptive_avg_pool1d(states, tgt.size(2))
            w = self.attn(states=states, tgt=tgt, mask=src)
            y = self.classifier(torch.cat([tgt, w], dim=2))

            return y

        else:
            tgt = tgt[:, t - 1, :].unsqueeze(1)

            states = F.adaptive_avg_pool1d(states, tgt.size(2))

            w = self.attn(states=states, tgt=tgt, mask=src)

            y = self.classifier(torch.cat([tgt, w], dim=2)).squeeze(1)

            return F.softmax(y, dim=1)
示例#4
0
def _se_pool_step_export(x, context_window_tensor):
    timesteps = x.shape[-1]
    if timesteps < context_window_tensor:
        y = F.adaptive_avg_pool1d(x, 1)
    elif context_window_tensor < 0:
        y = F.adaptive_avg_pool1d(x, 1)
    else:
        y = F.avg_pool1d(x, int(context_window_tensor),
                         1)  # [B, C, T - context_window + 1]
    return y
示例#5
0
    def forward(self, img, att_size=7):
        x = img  # .unsqueeze(0)  # 3x224x224

        x = self.resnet.conv1(x)  # 64x112x112
        x = self.resnet.bn1(x)  # 64x112x112
        x = self.resnet.relu(x)  # 64x112x112
        x = self.resnet.maxpool(x)  # 64x56x56

        visual = []
        x = self.resnet.layer1(x)  # 256x56x56
        visual.append(self._avg_pooling(x))
        x = self.resnet.layer2(x)  # 512x28x28
        visual.append(self._avg_pooling(x))
        x = self.resnet.layer3(x)  # 1024x14x14
        visual.append(self._avg_pooling(x))
        x = self.resnet.layer4(x)  # 2048x7x7
        visual.append(self._avg_pooling(x))

        # BASIC
        # fc = x.mean(3).mean(2)  # .squeeze()
        # # att = F.adaptive_avg_pool2d(x,[att_size,att_size]).permute(0, 2, 3, 1)
        # att = x.permute(0, 2, 3, 1)
        ###

        ### AoANet - Method - 512 - 1x1conv
        # fc = x.mean(3).mean(2)  # .squeeze()
        # att = self.last_conv(x)
        # att = att.permute(0, 2, 3, 1)
        # att = att.view(att.size(0), -1, att.size(-1))  # TODO add this line for AoANet code
        ###

        ### AoANet - Method - 512 - maxpooling
        fc = x.mean(3).mean(2)  # .squeeze()  # 2048x7x7
        # att = x.permute(0, 2, 3, 1)  # 7x7x2048
        # att = att.view(att.size(0), -1, att.size(-1))  # 49x2048  # TODO add this line for AoANet code
        # att = F.adaptive_avg_pool1d(att, 512)  # 49x512
        # att = F.adaptive_max_pool1d(att, 512)
        ###

        att = torch.cat(visual, -1)
        att = att.view(att.size(0), -1, att.size(-1))
        att = F.adaptive_avg_pool1d(att, 512)

        x = x.permute(0, 2, 3, 1)  # 7x7x2048
        x = x.view(x.size(0), -1,
                   x.size(-1))  # 49x2048  # TODO add this line for AoANet code
        x = F.adaptive_avg_pool1d(x, 512)  # 49x512
        att += x

        return fc, att  # (batch, 2048), (batch, 7, 7, 2048)
示例#6
0
    def forward(self, x, *args):
        B, S, C, H, W = x.size()
        x = x.view(B * S, C, H, W)
        f = self.featuremaps(x)
        _, c, h, w = f.shape

        v_g = self.parts_avgpool(f).view(B, S, c, self.parts)
        t_a = F.normalize(v_g.norm(p=2, dim=2, keepdim=True), p=1, dim=1)
        h_index = t_a.argmax(dim=1, keepdim=True)
        f_1 = v_g.gather(dim=1, index=h_index.expand((B, 1, c, self.parts))).view(B, c, self.parts)
        f_2 = v_g.mul(t_a).sum(dim=1)
        f_fuse = torch.cat([f_1, f_2], dim=1)

        f_g = F.adaptive_avg_pool1d(f_fuse, 1).view(B, -1)
        f_t = self.fc1(f_g)

        if not self.training:
            return f_t

        y = self.classifier(f_t)

        if self.loss == {'xent'}:
            return y
        elif self.loss == {'xent', 'htri'}:
            return y, f_t
        else:
            raise KeyError('Unsupported loss: {}'.format(self.loss))
示例#7
0
    def forward(self, x):
        batch_size = x.size(0)
        x = get_graph_feature(x, k=self.k)
        x = self.conv1(x)
        x1 = x.max(dim=-1, keepdim=False)[0]
        pointfeat = x1

        x = get_graph_feature(x1, k=self.k)
        x = self.conv2(x)
        x2 = x.max(dim=-1, keepdim=False)[0]

        x = get_graph_feature(x2, k=self.k)
        x = self.conv3(x)
        x3 = x.max(dim=-1, keepdim=False)[0]

        x = get_graph_feature(x3, k=self.k)
        x = self.conv4(x)
        x4 = x.max(dim=-1, keepdim=False)[0]

        x = torch.cat((x1, x2, x3, x4), dim=1)

        x = self.conv5(x)
        x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
        x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)
        x = torch.cat((x1, x2), 1)

        x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)
        x = self.dp1(x)
        x = x.view(-1, 1024)
        if self.global_feat:
            return x
        else:
            x = x.view(-1, 1024, 1).repeat(1, 1, self.num_points)
            return torch.cat([x, pointfeat], 1)
        return x
示例#8
0
    def cnn_forward(self, image, heatmap):
        if self.modality == 'rgb':
            sample_len = 3
        N, C, H, W = image.size()
        T = C // sample_len
        image = image.view((-1, sample_len) + image.size()[-2:])
        conv_out = self.cnn_model.base_model.get_conv_out(image)
        # use heatmap
        _f_list = []
        N, C, K, K = conv_out.size()
        heatmap = F.upsample(heatmap, size=(K, K),
                             mode='bilinear').contiguous()
        for i in range(heatmap.size(1)):
            _f = conv_out * heatmap[:, i, :, :].unsqueeze(1)
            _f = F.adaptive_avg_pool2d(_f, [1, 1]).squeeze()
            _f_list.append(_f)
        f = torch.stack(_f_list, 2)
        # NxT C J
        f = F.adaptive_avg_pool1d(f, 1)
        # don't use heatmap
        # f = F.adaptive_avg_pool2d(conv_out,[1,1])

        # NxT C
        f = f.view(N, T, -1)
        # N T C
        return f
示例#9
0
    def forward(self, x):
        """Forward pass of the DGCNN model.

        Args:
            x (torch.tensor): input to the network in the format: [B, N_feat, N_points]
        """
        batch_size = x.size(0)
        x_list = []

        # convolutional layers
        for it in range(len(self.conv_dims) - 1):
            x = self.get_graph_feature(x, k=self.k)
            x = self.__getattr__(f'conv_layers_{it}')(x)
            x = x.max(dim=-1, keepdim=False)[0]
            x_list.append(x)

        # embedding layer
        x = self.embedding_layer(torch.cat(x_list, dim=1))

        # prepare for FC layer input
        x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
        x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)
        x = torch.cat((x1, x2), 1)

        # fully connected layers
        for it in range(len(self.fc_dims) - 1):
            x = self.__getattr__(f'fc_layers_{it}')(x)

        # final layer
        x = self.final_layer(x)

        return x
示例#10
0
    def forward(self, x):
        batch_size = x.size(0)
        x = x.transpose(1,2)                    # x.shape = (1, 3, 883)
        x = get_graph_feature(x, k=self.k)      # x.shape = (1, 6, 1024, 20)          (1, 6, 892, 20)
        x = self.conv1(x)                       # x.shape = (1, 64, 1024, 20)         (1, 64, 892, 20)
        x1 = x.max(dim=-1, keepdim=False)[0]   # x1.shape = (1, 64, 1024)            (1, 64, 892)

        x = get_graph_feature(x1, k=self.k)     # x.shape = (1, 128, 1024, 20)        (1, 128, 892, 20)
        x = self.conv2(x)                       # x.shape = (1, 64, 1024, 20)         (1, 128, 892, 20)
        x2 = x.max(dim=-1, keepdim=False)[0]   # x2.shape = (1, 64, 1024)            (1, 128, 892, 20)

        x = get_graph_feature(x2, k=self.k)     # x.shape = (1, 128, 1024, 20)       (1, 128, 892, 20)
        x = self.conv3(x)                       # x.shape = (1, 128, 1024, 20)       (1, 128, 892, 20)
        x3 = x.max(dim=-1, keepdim=False)[0]   # x3.shape = (1, 128, 1024)          (1, 128, 892)

        x = get_graph_feature(x3, k=self.k)     # x.shape = (1, 256, 1024, 20)      (1, 256, 892, 20)
        x = self.conv4(x)                       # x.shape = (1, 256, 1024, 20)      (1, 256, 892, 20)
        x4 = x.max(dim=-1, keepdim=False)[0]   # x4.shape = (1, 256, 1024)          (1, 256, 892)

        x = torch.cat((x1, x2, x3, x4), dim=1)  # x.shape = (1, 512, 1024)          (1, 512, 892)

        x = self.conv5(x)                       # x.shape = (1, 1024, 1024)         (1, 1024, 892)
        x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)   # x1.shape = (1, 1024)
        x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)   # x2.shape = (1, 1024)
        x = torch.cat((x1, x2), 1)                              # x.shape = (1, 2048)

        x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)     # x.shape = (1, 512)
        x = self.dp1(x)
        x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)     # x.shape = (1, 256)
        x = self.dp2(x)
        x = self.linear3(x)                                                 # x.shape = (1, 40)
        return x
    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)

        x = self.conv2(x)
        x = self.bn2(x)
        x = self.relu(x)

        x = self.block1(x)
        x = self.block2(x)
        x = self.block3(x)
        x = self.block4(x)
        x = self.block5(x)
        x = self.block6(x)
        x = self.block7(x)
        x = self.block8(x)
        x = self.block9(x)
        # x = self.block10(x)
        # x = self.block11(x)
        x = self.block12(x)

        x = self.conv3(x)
        x = self.bn3(x)
        x = self.relu(x)

        x = self.conv4(x)
        x = self.bn4(x)
        x = self.relu(x)
        # print x.size()
        x = F.adaptive_avg_pool1d(x, (1, ))
        global_pooled = x.view(x.size(0), -1)
        output = self.fc(global_pooled)

        return global_pooled, torch.nn.functional.log_softmax(output)
示例#12
0
    def forward(self, seq, lengths):
        #print(seq.size())
        self.h = self.init_hidden(seq.size(1))
        embs = self.emb(seq)
        #print(embs.size())
        embs = pack_padded_sequence(embs, lengths)
        gru_out, self.h = self.gru(embs, self.h)
        gru_out, lengths = pad_packed_sequence(gru_out)
        #print(gru_out.size())
        avg_pool = F.adaptive_avg_pool1d(gru_out.permute(1, 2, 0),
                                         1).view(seq.size(1), -1)
        #print('Adaptive avg pooling', avg_pool.size())

        # adaptive avg pooling by hand
        # taking the sum along the batch axis and dividing by the corresponding lengths to get the actual mean
        #avg_pool_byhand = torch.sum(gru_out, dim=0) / Variable(torch.FloatTensor(lengths).view(-1, 1))
        #print('By hand Adaptive avg pooling', avg_pool_byhand)

        max_pool = F.adaptive_max_pool1d(gru_out.permute(1, 2, 0),
                                         1).view(seq.size(1), -1)
        #print('Adaptive max pooling', max_pool.size())

        # adaptive max pooling by hand
        # collect all the non padded elements of the batch and then take max of them
        # max_pool_byhand = torch.cat([torch.max(i[:l], dim=0)[0].view(1, -1) for i, l in zip(gru_out.permute(1, 0, 2), lengths)], dim=0)
        #print('By hand Adaptive max pooling', max_pool_byhand)

        # outp = self.out(torch.cat([self.h[-1],avg_pool,max_pool],dim=1))
        outp = self.out(torch.cat([gru_out[-1], avg_pool, max_pool], dim=1))
        return F.log_softmax(outp, dim=-1)
示例#13
0
    def forward(self, x):
        x = torch.transpose(x, 1, 2)
        batch_size = x.size(0)
        x = self.get_graph_feature(x)
        x = self.conv1(x)
        x1 = x.max(dim=-1, keepdim=False)[0]

        x = self.get_graph_feature(x1)
        x = self.conv2(x)
        x2 = x.max(dim=-1, keepdim=False)[0]

        x = self.get_graph_feature(x2)
        x = self.conv3(x)
        x3 = x.max(dim=-1, keepdim=False)[0]

        x = self.get_graph_feature(x3)
        x = self.conv4(x)
        x4 = x.max(dim=-1, keepdim=False)[0]

        x = torch.cat((x1, x2, x3, x4), dim=1)

        x = self.conv5(x)
        x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
        x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)
        x = torch.cat((x1, x2), 1)  # [B x 2 * code_size]

        x = F.leaky_relu(self.bn6(self.linear1(x)),
                         negative_slope=0.2)  # [B x code_size]
        return x
示例#14
0
    def _forward(self, *inputs):
        if len(inputs) == 2 and self.use_ext:
            x, ext = inputs
        else:
            x, = inputs

        x = self.stem(x)
        x = self.block_a(x)
        x = self.block_b(x)
        x = self.block_c(x)
        x = self.block_d(x)

        # """
        #     Bert pooling
        # """
        # # N,C,D -> N,D,C
        # x = x.transpose(1, 2)

        # bert_outputs, _ = self.bert_pool(x)
        # x = bert_outputs[:, 0, :]

        x = F.adaptive_avg_pool1d(x, 1).flatten(1)

        if self.use_ext:
            # embedding of external data
            ext_emb = self.embedding(ext)

            x = torch.cat((x, ext_emb), dim=1)

        x = self.classifier(x)

        return x
示例#15
0
 def forward(self, x):
     B, D, N = x.shape
     map2d = x.new_zeros(B, D, N, N)
     map2d[:, :, range(N), range(N)] = x
     for (i, j) in self.maskij:
         map2d[:, :, i, j] = F.adaptive_avg_pool1d(x, len(i))
     return map2d
示例#16
0
 def forward(self, data, structure):
     out = self.preprocess(data, data, structure)
     for block in self.blocks:
         out = block(out, data, structure)
     out = self.postprocess(out, data, structure)
     out = out.reshape(data.size(0), -1, self.width)
     return func.adaptive_avg_pool1d(out, 1)
示例#17
0
    def forward(self, x):
        bsize, num_feat, num_pts = x.size()

        # x0 = get_graph_feature(x, k=self.k)     # (bsize, 3, num_points) -> (bsize, 3*2, num_points, k)
        # t = self.transform_net(x0)              # (bsize, 3, 3)
        # x = x.transpose(2, 1)                   # (bsize, 3, num_points) -> (bsize, num_points, 3)
        # x = torch.bmm(x, t)                     # (bsize, num_points, 3) * (bsize, 3, 3) -> (bsize, num_points, 3)
        # x = x.transpose(2, 1)

        neigh_indexs, permatrix = self.permatrix_lsa(x)

        feature = F.gelu(self.conv1(x, neigh_indexs, permatrix))
        x1 = feature.clone()

        feature = F.gelu(self.conv2(feature, neigh_indexs, permatrix))
        x2 = feature.clone()

        feature = F.gelu(self.conv3(feature, neigh_indexs, permatrix))
        x3 = feature.clone()

        feature = F.gelu(self.conv4(feature, neigh_indexs, permatrix))
        x4 = feature.clone()

        x = torch.cat((x1, x2, x3, x4), dim=1)
        x = F.gelu(self.conv5(x))
        x1 = F.adaptive_max_pool1d(x, 1).view(bsize, -1)
        x2 = F.adaptive_avg_pool1d(x, 1).view(bsize, -1)
        x = torch.cat((x1, x2), 1)

        x = F.gelu(self.bn6(self.linear1(x)))
        x = self.dp1(x)
        x = F.gelu(self.bn7(self.linear2(x)))
        x = self.dp2(x)
        x = self.linear3(x)
        return x
    def forward(self, x):
        x = self.conv(x)
        x = self.bn0(x)
        x = self.relu(x)

        batch, rchannel = x.shape[:2]
        if self.radix > 1:
            splited = torch.split(x, int(rchannel // self.radix), dim=1)
            gap = sum(splited)
        else:
            gap = x
        gap = F.adaptive_avg_pool1d(gap, 1)
        gap = self.fc1(gap)
        gap = self.bn1(gap)
        gap = self.relu(gap)

        atten = self.fc2(gap)
        atten = self.rsoftmax(atten).view(batch, -1, 1)

        if self.radix > 1:
            attens = torch.split(atten, int(rchannel // self.radix), dim=1)
            outs = []
            for att, split in zip(attens, splited):
                outs.append(att * split)
            out = sum(outs)
        else:
            out = atten * x

        return out.contiguous()
 def forward(self, transpose_xyz): 
     x = transpose_xyz
     batch_size = x.size(0)
     num_points=x.size(2)
     x = self._get_graph_feature(x, self.k)  
     x = self.conv1(x) 
     x1 = x.max(dim=-1, keepdim=False)[0] 
     x = self._get_graph_feature(x1, self.k) 
     x = self.conv2(x) 
     x2 = x.max(dim=-1, keepdim=False)[0] 
     x = self._get_graph_feature(x2, self.k)
     x = self.conv3(x) 
     x3 = x.max(dim=-1, keepdim=False)[0] 
     x = self._get_graph_feature(x3, self.k) 
     x = self.conv4(x) 
     x4 = x.max(dim=-1, keepdim=False)[0] 
     x = torch.cat((x1, x2, x3, x4), dim=1) 
     local_concat = x 
     x = self.conv5(x) 
     x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
     x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)
     x = torch.cat((x1, x2), 1)                      
     global_vector = x                               
     repeat_glb_feat = global_vector.unsqueeze(-1).expand(batch_size, global_vector.shape[1], num_points)
     x = torch.cat((local_concat, repeat_glb_feat), 1)  
     embedding_feat = self.mlp(x)                   
     return embedding_feat, global_vector.unsqueeze(-1)  
    def forward(self, inputs, sorted_length):
        embeddings = self.embedding(inputs)  #64,150,300
        embeddings = embeddings.float()
        embeddings_pad = torch.nn.utils.rnn.pack_padded_sequence(
            embeddings, sorted_length, batch_first=True)
        states, hidden = self.encoder(embeddings_pad)
        states, unpacked_len = torch.nn.utils.rnn.pad_packed_sequence(
            states, batch_first=True, padding_value=0)
        hidden = hidden[-1].permute(1, 2, 0).contiguous().view(
            embeddings.size(0), -1)

        att = self.attn(states.permute(1, 0, 2)).squeeze(-1)
        att = F.softmax(att, dim=-1)
        r_att = torch.sum(att.unsqueeze(-1) * states.permute(1, 0, 2), dim=1)
        #print(states.shape)#150 32 200
        #states.permute(1,2,0) 32 200 150

        avg_pool = F.adaptive_avg_pool1d(states.permute(0, 2, 1),
                                         1).view(embeddings.size(0),
                                                 -1)  #32 200 150
        max_pool = F.adaptive_max_pool1d(states.permute(0, 2, 1),
                                         1).view(embeddings.size(0), -1)

        out = torch.cat([hidden, max_pool, avg_pool], dim=1)
        #encoding = torch.cat([states[0],states[-1]], dim=1)          #concate  maxpooling  或是全部變ㄧ個?
        outputs = self.decoder(out)
        #outputs=F.softmax(outputs, dim=-1)
        return outputs
示例#21
0
    def forward(self, img, att_size=7):
        x = img  # .unsqueeze(0)  # 3x224x224

        layer1 = self.layer1(x)  # 128,112,112
        layer2 = self.layer2(layer1)  # 256,56,56
        layer3 = self.layer3(layer2)  # 512,28,28
        layer4 = self.layer4(layer3)  # 512x7x7

        fc = layer4.mean(3).mean(2)  # 512

        low = torch.cat((layer1,
                         F.upsample(layer2,
                                    size=layer1.size()[2:],
                                    mode='bilinear',
                                    align_corners=True)), 1)  # 384,112,112
        low = F.adaptive_avg_pool2d(low, [28, 28])  # 384,28,28,

        high = torch.cat((layer3,
                          F.upsample(layer4,
                                     size=layer3.size()[2:],
                                     mode='bilinear',
                                     align_corners=True)), 1)  # 1024,28,28
        mixed = torch.cat((low, high), 1)  # 1408,28,28
        att = self._avg_pooling(mixed)  # 7,7,1408

        att = torch.cat((att, layer4.permute(0, 2, 3, 1)), -1)  # 7,7,1920
        att = att.view(att.size(0), -1, att.size(-1))  # 49,1408
        att = F.adaptive_avg_pool1d(att, 512)  # 49,512
        layer4 = layer4.permute(0, 2, 3, 1)
        layer4 = layer4.view(layer4.size(0), -1, layer4.size(-1))
        att += layer4

        return fc, att  # (batch, 512), (batch, 7x7, 512)
示例#22
0
 def forward(self, x):
     res = x
     batch_size = x.size(0)
     x = self.conv1(x)
     x = self.conv2(x + res)
     x = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)
     return self.fc(x)
示例#23
0
    def forward(self, x):
        batch_size = x.size(0)
        x = get_graph_feature(x, k=self.k)
        x = self.conv1(x)
        x1 = x.max(dim=-1, keepdim=False)[0]

        x = get_graph_feature(x1, k=self.k)
        x = self.conv2(x)
        x2 = x.max(dim=-1, keepdim=False)[0]

        x = get_graph_feature(x2, k=self.k)
        x = self.conv3(x)
        x3 = x.max(dim=-1, keepdim=False)[0]

        x = get_graph_feature(x3, k=self.k)
        x = self.conv4(x)
        x4 = x.max(dim=-1, keepdim=False)[0]

        x = torch.cat((x1, x2, x3, x4), dim=1)

        x = self.conv5(x)
        x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
        x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)
        x = torch.cat((x1, x2), 1)

        x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)
        x = self.dp1(x)
        x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)
        x = self.dp2(x)
        x = self.linear3(x)
        return x
示例#24
0
    def forward(self, x):
        y = F.adaptive_avg_pool1d(x, 1).view(x.size(0), -1)
        y = self.sig(self.fc(y)).view(x.size(0), x.size(1), -1)

        if self.do_mul: x = x * y
        if self.do_add: x = x + y
        return x
示例#25
0
    def forward(self, x):
        x = self.conv(x)
        if self.use_bn:
            x = self.bn0(x)
        if self.dropblock_prob > 0.0:
            x = self.dropblock(x)
        x = self.relu(x)

        batch, channel = x.shape[:2]
        if self.radix > 1:
            splited = torch.split(x, channel // self.radix, dim=1)
            gap = sum(splited)
        else:
            gap = x
        gap = F.adaptive_avg_pool1d(gap, 1)
        gap = self.fc1(gap)

        if self.use_bn:
            gap = self.bn1(gap)
        gap = self.relu(gap)

        atten = self.fc2(gap).view((batch, self.radix, self.channels))
        if self.radix > 1:
            atten = F.softmax(atten, dim=1).view(batch, -1, 1)
        else:
            atten = F.sigmoid(atten, dim=1).view(batch, -1, 1)

        if self.radix > 1:
            atten = torch.split(atten, channel // self.radix, dim=1)
            out = sum([att * split for (att, split) in zip(atten, splited)])
        else:
            out = atten * x
        return out.contiguous()
    def forward(self, input_var, input_len):
        embeded = self.embed(input_var)
        embeded = self.dropout1(embeded)
        total_length = embeded.size(1)
        packed = torch.nn.utils.rnn.pack_padded_sequence(embeded,
                                                         input_len,
                                                         batch_first=True)
        self.gru.flatten_parameters()
        outputs, hidden = self.gru(packed)
        # print(hidden)
        outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(
            outputs, batch_first=True, total_length=total_length)

        # method-1: as it is a classification problem, we just grab the last hidden state -0.85
        # outputs = F.relu(self.dropout2( torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1) ))

        # method-2: cat last 2 hidden state , avg-pooling and max-pooling - 0.86
        avgpool = F.adaptive_avg_pool1d(outputs.permute(0, 2, 1), 1).squeeze(2)
        maxpool = F.adaptive_max_pool1d(outputs.permute(0, 2, 1), 1).squeeze(2)
        outputs = F.relu(
            self.dropout2(
                torch.cat(
                    (hidden[-2, :, :], hidden[-1, :, :], avgpool, maxpool),
                    dim=1)))

        outputs = self.fc(outputs)
        return outputs
示例#27
0
    def forward(self, x):
        batch_size = x.size(0)
        x = get_graph_feature(x, k=self.k, use_cuda=self.use_cuda)
        x = self.conv1(x)
        x1 = x.max(dim=-1, keepdim=False)[0]
        x = get_graph_feature(x1, k=self.k, use_cuda=self.use_cuda)
        x = self.conv2(x)
        x2 = x.max(dim=-1, keepdim=False)[0]
        x = get_graph_feature(x2, k=self.k, use_cuda=self.use_cuda)
        x = self.conv3(x)
        x3 = x.max(dim=-1, keepdim=False)[0]
        x = get_graph_feature(x3, k=self.k, use_cuda=self.use_cuda)
        x = self.conv4(x)
        x4 = x.max(dim=-1, keepdim=False)[0]
        local_x = torch.cat((x1, x2, x3, x4), dim=1) # batch_size x channels x num_pts

        x = self.conv5(local_x)
        x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
        x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)
        global_x = torch.cat((x1, x2), 1)   # batch_size x 2*channels

        global_x = global_x.view(global_x.shape[0], global_x.shape[1], 1).repeat(1, 1, local_x.shape[2])
        x = torch.cat([local_x, global_x], dim=1)
        
        x = self.linear1(x.permute(0, 2, 1)).permute(0, 2, 1)
        x = F.leaky_relu(self.bn6(x), negative_slope=0.2)
        x = self.dp1(x)
        x = self.linear2(x.permute(0, 2, 1)).permute(0, 2, 1)
        x = F.leaky_relu(self.bn7(x), negative_slope=0.2)
        x = self.dp2(x)
        x = self.linear3(x.permute(0, 2, 1))
        return x
示例#28
0
    def forward(self, x):
        # 首先通过注意力网络
        GAT = GraphAttentionLayer(in_features=5, out_features=5, dropout=0.5)
        x = GAT(x)
        batch_size = x.size(0)
        # x = get_graph_feature(x, k=self.k)
        x = self.conv1(x)
        x1 = x.max(dim=-1, keepdim=False)[0]

        x = get_graph_feature(x1, k=self.k)
        x = self.conv2(x)
        x2 = x.max(dim=-1, keepdim=False)[0]

        x = get_graph_feature(x2, k=self.k)
        x = self.conv3(x)
        x3 = x.max(dim=-1, keepdim=False)[0]

        x = get_graph_feature(x3, k=self.k)
        x = self.conv4(x)
        x4 = x.max(dim=-1, keepdim=False)[0]

        x = torch.cat((x1, x2, x3, x4), dim=1)

        x = self.conv5(x)
        x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
        x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)
        x = torch.cat((x1, x2), 1)

        x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)
        x = self.dp1(x)
        x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)
        x = self.dp2(x)
        x = self.linear3(x)
        return x
示例#29
0
    def forward(self, x):
        batch_size = x.size(0)

        spirals_index = knn(x, self.k)
        x = F.gelu(self.conv1(x))
        x1 = x.clone()

        x = F.gelu(self.conv2(x))
        x2 = x.clone()

        x = F.gelu(self.conv3(x))
        x3 = x.clone()

        x = F.gelu(self.conv4(x))
        x4 = x.clone()

        x = torch.cat((x1, x2, x3, x4), dim=1)
        x = F.gelu(self.conv5(x))
        x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
        x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)
        x = torch.cat((x1, x2), 1)

        x = F.gelu(self.bn6(self.linear1(x)))
        x = self.dp1(x)
        x = F.gelu(self.bn7(self.linear2(x)))
        x = self.dp2(x)
        x = self.linear3(x)
        return x
示例#30
0
    def forward(self, x):
        batch_size = x.size(0)

        # x0 = get_graph_feature(x, k=self.k)     # (batch_size, 3, num_points) -> (batch_size, 3*2, num_points, k)
        # t = self.transform_net(x0)              # (batch_size, 3, 3)
        # x = x.transpose(2, 1)                   # (batch_size, 3, num_points) -> (batch_size, num_points, 3)
        # x = torch.bmm(x, t)                     # (batch_size, num_points, 3) * (batch_size, 3, 3) -> (batch_size, num_points, 3)
        # x = x.transpose(2, 1)

        spirals_index, adjweight = self.transform(x)
        x = F.gelu(self.conv1(x, spirals_index, adjweight))
        x1 = x.clone()

        x = F.gelu(self.conv2(x, spirals_index, adjweight))
        x2 = x.clone()

        x = F.gelu(self.conv3(x, spirals_index, adjweight))
        x3 = x.clone()

        x = F.gelu(self.conv4(x, spirals_index, adjweight))
        x4 = x.clone()

        x = torch.cat((x1, x2, x3, x4), dim=1)
        x = F.gelu(self.conv5(x))
        x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
        x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1)
        x = torch.cat((x1, x2), 1)

        x = F.gelu(self.bn6(self.linear1(x)))
        x = self.dp1(x)
        x = F.gelu(self.bn7(self.linear2(x)))
        x = self.dp2(x)
        x = self.linear3(x)
        return x
示例#31
0
 def forward(self, inputs, state):
     x = self.embedder(inputs)
     x = x.transpose(1, 2)
     state = F.adaptive_avg_pool1d(state, x.size(2))
     x = torch.cat([x, state], 1)
     x = self.convs(x)
     x = x.transpose(1, 2)  # BxTxN
     x = x.contiguous().view(-1, x.size(2))
     x = self.classifier(x)
     x = x.view(inputs.size(0), inputs.size(1), -1)  # BxTxN
     return x