Exemplo n.º 1
0
    def forward(self, x):
        conv1 = self.conv_block1(x)
        conv2 = self.conv_block2(conv1)
        conv3 = self.conv_block3(conv2)
        conv4 = self.conv_block4(conv3)
        conv5 = self.conv_block5(conv4)

        score = self.classifier(conv5)
        score_pool4 = self.score_pool4(conv4)

        score = F.upsample_bilinear(score, score_pool4.size()[2:])
        score += score_pool4
        out = F.upsample_bilinear(score, x.size()[2:])

        return out
Exemplo n.º 2
0
def batch_augment(images, attention_map, mode='crop', theta=0.5, padding_ratio=0.1):
    batches, _, imgH, imgW = images.size()

    if mode == 'crop':
        crop_images = []
        for batch_index in range(batches):
            atten_map = attention_map[batch_index:batch_index + 1]
            if isinstance(theta, tuple):
                theta_c = random.uniform(*theta) * atten_map.max()
            else:
                theta_c = theta * atten_map.max()

            crop_mask = F.upsample_bilinear(atten_map, size=(imgH, imgW)) >= theta_c
            nonzero_indices = torch.nonzero(crop_mask[0, 0, ...])

            height_min = max(int(nonzero_indices[:, 0].min().item() - padding_ratio * imgH), 0)
            height_max = min(int(nonzero_indices[:, 0].max().item() + padding_ratio * imgH), imgH)
            width_min = max(int(nonzero_indices[:, 1].min().item() - padding_ratio * imgW), 0)
            width_max = min(int(nonzero_indices[:, 1].max().item() + padding_ratio * imgW), imgW)

            crop_images.append(
                F.upsample_bilinear(images[batch_index:batch_index + 1, :, height_min:height_max, width_min:width_max],
                                    size=(imgH, imgW)))
        crop_images = torch.cat(crop_images, dim=0)
        return crop_images

    elif mode == 'drop':
        drop_masks = []
        for batch_index in range(batches):
            atten_map = attention_map[batch_index:batch_index + 1]
            if isinstance(theta, tuple):
                theta_d = random.uniform(*theta) * atten_map.max()
            else:
                theta_d = theta * atten_map.max()

            drop_masks.append(F.upsample_bilinear(atten_map, size=(imgH, imgW)) < theta_d)
        drop_masks = torch.cat(drop_masks, dim=0)
        drop_images = images * drop_masks.float()
        return drop_images

    else:
        raise ValueError('Expected mode in [\'crop\', \'drop\'], but received unsupported augmentation method %s' % mode)
Exemplo n.º 3
0
    def forward(self, x):
        """
        :param x:
        :return:
        """
        x_size = x.size()[2:]
        x_conv1 = self.conv1_bn(x)
        x_conv2 = self.conv2_dw(x_conv1)
        x_conv3 = self.conv3_dw(x_conv2)
        x_conv4 = self.conv4_dw(x_conv3)
        x_conv5 = self.conv5_dw(x_conv4)
        x_conv6 = self.conv6_dw(x_conv5)
        x_conv7 = self.conv7_dw(x_conv6)
        x_conv8 = self.conv8_dw(x_conv7)
        x_conv9 = self.conv9_dw(x_conv8)
        x_conv10 = self.conv10_dw(x_conv9)
        x_conv11 = self.conv11_dw(x_conv10)
        x_conv12 = self.conv12_dw(x_conv11)
        x_conv13 = self.conv13_dw(x_conv12)
        x = self.conv14_dw(x_conv13)

        # x = self.avg_pool(x)
        # x = x.view(-1, 1024)
        # x = self.fc(x)

        score = self.classifier(x)

        if self.module_type == '16s' or self.module_type == '8s':
            score_pool4 = self.score_pool4(x_conv12)
        if self.module_type == '8s':
            score_pool3 = self.score_pool3(x_conv6)

        if self.module_type == '16s' or self.module_type == '8s':
            score = F.upsample_bilinear(score, score_pool4.size()[2:])
            score += score_pool4
        if self.module_type == '8s':
            score = F.upsample_bilinear(score, score_pool3.size()[2:])
            score += score_pool3

        out = F.upsample_bilinear(score, x_size)

        return out
Exemplo n.º 4
0
    def forward(self, x):
        x_size = x.size()[2:]
        x_conv1 = self.conv1(x)
        x = self.bn1(x_conv1)
        x = self.relu(x)
        x = self.maxpool(x)

        x_layer1 = self.layer1(x)
        x_layer2 = self.layer2(x_layer1)
        x_layer3 = self.layer3(x_layer2)
        x = self.layer4(x_layer3)

        # x = self.avgpool(x)
        # x = x.view(x.size(0), -1)
        # x = self.fc(x)
        score = self.classifier(x)

        if self.module_type == '16s' or self.module_type == '8s':
            score_pool4 = self.score_pool4(x_layer3)
        if self.module_type == '8s':
            score_pool3 = self.score_pool3(x_layer2)

        if self.module_type == '16s' or self.module_type == '8s':
            # print('score_pool4.size():', score_pool4.size())
            # print('score.size():', score.size())
            if self.upsample_method == 'upsample_bilinear':
                score = F.upsample_bilinear(score, score_pool4.size()[2:])
            elif self.upsample_method == 'ConvTranspose2d':
                score = self.upsample_1(score)
            score += score_pool4
        if self.module_type == '8s':
            # print('score_pool3.size():', score_pool3.size())
            # print('score.size():', score.size())
            if self.upsample_method == 'upsample_bilinear':
                score = F.upsample_bilinear(score, score_pool3.size()[2:])
            elif self.upsample_method == 'ConvTranspose2d':
                score = self.upsample_2(score)
            score += score_pool3

        out = F.upsample_bilinear(score, x_size)

        return out
    def forward(self, feed_dict):

        prediction_dict = {}

        # base model
        base_feat = self.feature_extractor.first_stage_feature(
            feed_dict['img'])
        feed_dict.update({'base_feat': base_feat})

        # rpn model
        prediction_dict.update(self.rpn_model.forward(feed_dict))

        # proposals = prediction_dict['proposals_batch']
        # shape(N,num_proposals,5)
        # pre subsample for reduce consume of memory
        if self.training:
            self.pre_subsample(prediction_dict, feed_dict)
        rois_batch = prediction_dict['rois_batch']

        # note here base_feat (N,C,H,W),rois_batch (N,num_proposals,5)
        pooled_feat = self.rcnn_pooling(base_feat, rois_batch.view(-1, 5))

        # shape(N,C,1,1)
        pooled_feat = self.feature_extractor.second_stage_feature(pooled_feat)
        pooled_feat_cls = self.feature_extractor.third_stage_feature(
         pooled_feat)
        #  self.add_feat('base_feat', base_feat)

        # shape(N,C)
        #  pooled_feat_cls = F.upsample_bilinear(pooled_feat, scale_factor=2)
        pooled_feat_bbox = pooled_feat.mean(3).mean(2)

        #  pooled_feat_cls = pooled_feat_cls.mean(3).mean(2)
        rcnn_bbox_preds = self.rcnn_bbox_pred(pooled_feat_bbox)
        rcnn_cls_scores_map = self.rcnn_cls_pred(pooled_feat_cls)
        rcnn_cls_scores_map = F.upsample_bilinear(
            rcnn_cls_scores_map, scale_factor=2)

        rcnn_cls_scores = rcnn_cls_scores_map.view(-1, 2, self.pooling_size *
                                                   self.pooling_size)

        rcnn_cls_probs = F.softmax(rcnn_cls_scores, dim=1)
        rcnn_cls_probs = rcnn_cls_probs.sum(dim=-1)

        prediction_dict['rcnn_cls_probs'] = rcnn_cls_probs
        prediction_dict['rcnn_bbox_preds'] = rcnn_bbox_preds
        prediction_dict['rcnn_cls_scores'] = rcnn_cls_scores

        # used for track
        proposals_order = prediction_dict['proposals_order']
        prediction_dict['second_rpn_anchors'] = prediction_dict['anchors'][
            proposals_order]

        return prediction_dict
Exemplo n.º 6
0
    def forward(self, x):
        original_shape = (x.shape[2], x.shape[3])

        x32 = self.x32(x)
        c32 = self.c32(x32)

        x16 = self.x16(x32)
        c16 = self.c16(x16)

        x8 = self.x8(x16)
        c8 = self.c8(x8)

        c32 = F.upsample_bilinear(c32, original_shape)
        c16 = F.upsample_bilinear(c16, original_shape)
        c8 = F.upsample_bilinear(c8, original_shape)

        x = c32 + c16 + c8
        x = F.log_softmax(x, dim=1)

        return x
Exemplo n.º 7
0
    def forward(self, x):
        input_size = x.size()[2:]
        feats = self.feats[0](x)
        feats3 = self.feats[1](feats)
        feats4 = self.feats[2](feats3)
        feats5 = self.feats[3](feats4)
        feats_fconn = self.fconn(feats5)

        # feats = [feats, feats3, feats4, feats5, feats_fconn]

        score_feat3 = self.score[0](feats3)
        score_feat4 = self.score[1](feats4)
        score_fconn = self.score[2](feats_fconn)

        score = F.upsample_bilinear(score_fconn, score_feat4.size()[2:])
        score += score_feat4
        score = F.upsample_bilinear(score, score_feat3.size()[2:])
        score += score_feat3

        return F.upsample_bilinear(score, input_size)
Exemplo n.º 8
0
    def forward(self, x):
        conv1 = self.conv1(x)
        denseblock1 = self.denseblock1(conv1)
        denseblock2 = self.denseblock2(denseblock1)
        denseblock3 = self.denseblock3(denseblock2)
        denseblock4 = self.denseblock4(denseblock3)

       
        x1 = conv1
        x2 = denseblock1
        x3 = F.upsample_bilinear(denseblock2, x1.size()[2:])
        x4 = F.upsample_bilinear(denseblock3, x1.size()[2:])
        x5 = F.upsample_bilinear(denseblock4, x1.size()[2:])
        xglobal = F.max_pool2d(x5, kernel_size=x5.size()[2:])
        out = torch.cat((x1, x2, x3, x4, x5,xglobal.expand(x1.size()[0],1920,x1.size()[2],x1.size()[3])), 1)
        # out = torch.cat((x1, x2, x3, x4, x5), 1)



        return out
    def forward(self, x):

        conv1 = self.conv_block1(x)
        maskedconv = self.pixelcnn(conv1)
        conv2 = self.conv_block2(maskedconv)
        conv3 = self.conv_block3(conv2)
        conv4 = self.conv_block4(conv3)
        conv5 = self.conv_block5(conv4)

        score = self.classifier(conv5)
        score_pool4 = self.score_pool4(conv4)
        score_pool3 = self.score_pool3(conv3)

        score = F.upsample_bilinear(score, score_pool4.size()[2:])
        score += score_pool4
        score = F.upsample_bilinear(score, score_pool3.size()[2:])
        score += score_pool3
        out = F.upsample_bilinear(score, x.size()[2:])

        return out
Exemplo n.º 10
0
    def forward(self, imgs, labels):
        n, h, w = labels.shape

        out = self.stage1(imgs)
        out = self.stage2(out)
        preds1 = self.classifier1(out)
        preds1 = F.upsample_bilinear(preds1, size=(h, w))

        out = self.stage3(out)
        preds2 = self.classifier2(out)
        preds2 = F.upsample_bilinear(preds2, size=(h, w))

        out = self.stage4(out)
        preds3 = self.classifier3(out)
        preds3 = F.upsample_bilinear(preds3, size=(h, w))

        #print preds.shape
        preds = preds1 + preds2 + preds3
        loss = F.nll_loss(F.log_softmax(preds), labels)
        return preds, loss, torch.stack([preds1, preds2, preds3])
Exemplo n.º 11
0
    def forward(self, x):
        en1 = self.down1(x)
        po1 = self.pool1(en1)
        en2 = self.down2(po1)
        po2 = self.pool2(en2)
        en3 = self.down3(po2)
        po3 = self.pool3(en3)
        en4 = self.down4(po3)
        po4 = self.pool4(en4)

        c1 = self.center(po4)

        dec1 = self.up1(torch.cat([c1, F.upsample_bilinear(en4, c1.size()[2:])], 1))
        dec2 = self.up2(torch.cat([dec1, F.upsample_bilinear(en3, dec1.size()[2:])], 1))
        dec3 = self.up3(torch.cat([dec2, F.upsample_bilinear(en2, dec2.size()[2:])], 1))
        dec4 = self.up4(torch.cat([dec3, F.upsample_bilinear(en1, dec3.size()[2:])], 1))
        
        out = self.output(dec4)

        return self.final(out)
Exemplo n.º 12
0
    def forward(self, x):
        # if x: 512
        fm0 = self.layer0(x)  # 256
        fm1 = self.layer1(fm0)  # 128
        fm2 = self.layer2(fm1)  # 64
        fm3 = self.layer3(fm2)  # 32
        fm4 = self.layer4(fm3)  # 16

        gcfm1 = self.brm1(self.gcm1(fm4))  # 16
        gcfm2 = self.brm2(self.gcm2(fm3))  # 32
        gcfm3 = self.brm3(self.gcm3(fm2))  # 64
        gcfm4 = self.brm4(self.gcm4(fm1))  # 128

        fs1 = self.brm5(F.upsample_bilinear(gcfm1, fm3.size()[2:]) + gcfm2)  # 32
        fs2 = self.brm6(F.upsample_bilinear(fs1, fm2.size()[2:]) + gcfm3)  # 64
        fs3 = self.brm7(F.upsample_bilinear(fs2, fm1.size()[2:]) + gcfm4)  # 128
        fs4 = self.brm8(F.upsample_bilinear(fs3, fm0.size()[2:]))  # 256
        out = self.brm9(F.upsample_bilinear(fs4, self.input_size))  # 512

        return out
Exemplo n.º 13
0
 def forward(self, Rf, Mf):
     Mf_size = Mf.size()
     # print('Rf.shape:', Rf.shape)
     # print('Mf.shape:', Mf.shape)
     mf = self.conv_M2m(Mf)
     # print('mf.shape:', mf.shape)
     # print('Rf.shape:', Rf.shape)
     rf = torch.cat((mf[:, :, :Rf.shape[2], :Rf.shape[3]], Rf), 1)
     rf = self.conv_R2r(rf)
     out = F.upsample_bilinear(rf, Mf_size[2:])
     return out
    def forward(self, x):
        # if x: 512
        fm0 = self.layer0(x)  # 256
        fm1 = self.layer1(fm0)  # 128
        fm2 = self.layer2(fm1)  # 64
        fm3 = self.layer3(fm2)  # 32
        fm4 = self.layer4(fm3)  # 16

        gcfm1 = self.brm1(self.gcm1(fm4))  # 16
        gcfm2 = self.brm2(self.gcm2(fm3))  # 32
        gcfm3 = self.brm3(self.gcm3(fm2))  # 64
        gcfm4 = self.brm4(self.gcm4(fm1))  # 128

        fs1 = self.brm5(F.upsample_bilinear(gcfm1, fm3.size()[2:]) + gcfm2)  # 32
        fs2 = self.brm6(F.upsample_bilinear(fs1, fm2.size()[2:]) + gcfm3)  # 64
        fs3 = self.brm7(F.upsample_bilinear(fs2, fm1.size()[2:]) + gcfm4)  # 128
        fs4 = self.brm8(F.upsample_bilinear(fs3, fm0.size()[2:]))  # 256
        out = self.brm9(F.upsample_bilinear(fs4, self.input_size))  # 512

        return out
Exemplo n.º 15
0
    def forward(self, x, low_level_feat):
        low_level_feat = self.conv1(low_level_feat)
        low_level_feat = self.bn1(low_level_feat)
        low_level_feat = self.relu(low_level_feat)

        x = F.upsample_bilinear(x, size=low_level_feat.size()[2:])
        #x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=True)
        x = torch.cat((x, low_level_feat), dim=1)
        x = self.last_conv(x)

        return x
Exemplo n.º 16
0
    def forward(self, x):
        x = self.conv1(x)  # -,-,320
        x = self.norm1(x)
        x = self.relu1(x)
        # print(x.size())
        x = self.conv2(x)  # -,-,160
        x = self.norm2(x)
        x = self.relu2(x)
        # print(x.size())
        x = self.conv3(x)  # -,-,80
        x = self.norm3(x)
        x = self.relu3(x)
        # print(x.size())
        x = self.conv4(x)  # -,-,40
        x = self.norm4(x)
        x = self.relu4(x)
        # print(x.size())
        x = self.conv5(x)  # -,-,20
        # print(x.size())
        # upsample
        x = F.upsample_bilinear(x, scale_factor=2)
        x = x[:, :, :-1, :-1]  # -,-, 31,31
        # print(x.size())

        x = F.upsample_bilinear(x, scale_factor=2)
        x = x[:, :, :-1, :-1]  # -,-,41,41
        # print(x.size())

        x = F.upsample_bilinear(x, scale_factor=2)
        x = x[:, :, :-1, :-1]  #-,-,81,81
        # print(x.size())

        x = F.upsample_bilinear(x, scale_factor=2)
        x = x[:, :, :-1, :-1]  #-,-,161,161
        # print(x.size())

        x = F.upsample_bilinear(x, scale_factor=2)
        x = x[:, :, :-2, :-2]  # -,-,321,321
        # print(x.size())

        return x
Exemplo n.º 17
0
 def forward(self, x):
   # Normalize x
   x = (x + 1.) / 2.0
   x = (x - self.mean) / self.std
   # Upsample if necessary
   if x.shape[2] != 299 or x.shape[3] != 299:
     if torch.__version__ in ['0.4.0']:
       x = F.upsample_bilinear(x, size=(299, 299))
     else:
       x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True)
   # 299 x 299 x 3
   x = self.net.Conv2d_1a_3x3(x)
   # 149 x 149 x 32
   x = self.net.Conv2d_2a_3x3(x)
   # 147 x 147 x 32
   x = self.net.Conv2d_2b_3x3(x)
   # 147 x 147 x 64
   x = F.max_pool2d(x, kernel_size=3, stride=2)
   # 73 x 73 x 64
   x = self.net.Conv2d_3b_1x1(x)
   # 73 x 73 x 80
   x = self.net.Conv2d_4a_3x3(x)
   # 71 x 71 x 192
   x = F.max_pool2d(x, kernel_size=3, stride=2)
   # 35 x 35 x 192
   x = self.net.Mixed_5b(x)
   # 35 x 35 x 256
   x = self.net.Mixed_5c(x)
   # 35 x 35 x 288
   x = self.net.Mixed_5d(x)
   # 35 x 35 x 288
   x = self.net.Mixed_6a(x)
   # 17 x 17 x 768
   x = self.net.Mixed_6b(x)
   # 17 x 17 x 768
   x = self.net.Mixed_6c(x)
   # 17 x 17 x 768
   x = self.net.Mixed_6d(x)
   # 17 x 17 x 768
   x = self.net.Mixed_6e(x)
   # 17 x 17 x 768
   # 17 x 17 x 768
   x = self.net.Mixed_7a(x)
   # 8 x 8 x 1280
   x = self.net.Mixed_7b(x)
   # 8 x 8 x 2048
   x = self.net.Mixed_7c(x)
   # 8 x 8 x 2048
   pool = torch.mean(x.view(x.size(0), x.size(1), -1), 2)
   # 1 x 1 x 2048
   logits = self.net.fc(F.dropout(pool, training=False).view(pool.size(0), -1))
   # 1000 (num_classes)
   return pool, logits
 def forward(self, x):
     enc1 = self.enc1(x)
     enc2 = self.enc2(enc1)
     enc3 = self.enc3(enc2)
     enc4 = self.enc4(enc3)
     center = self.center(enc4)
     dec4 = self.dec4(
         torch.cat(
             [center, F.upsample_bilinear(enc4,
                                          center.size()[2:])], 1))
     dec3 = self.dec3(
         torch.cat([dec4, F.upsample_bilinear(enc3,
                                              dec4.size()[2:])], 1))
     dec2 = self.dec2(
         torch.cat([dec3, F.upsample_bilinear(enc2,
                                              dec3.size()[2:])], 1))
     dec1 = self.dec1(
         torch.cat([dec2, F.upsample_bilinear(enc1,
                                              dec2.size()[2:])], 1))
     final = self.final(dec1)
     return F.upsample_bilinear(final, x.size()[2:])
Exemplo n.º 19
0
 def forward(self, feats):
     cls_score, mask_feats = multi_apply(self.forward_single, feats,
                                         self.scales)
     mask_feats[0] = self.solo_mask3(mask_feats[0])
     mask_feats[1] = self.solo_mask4(mask_feats[1])
     mask_feats[2] = self.solo_mask5(mask_feats[2])
     mask_feats[3] = self.solo_mask6(mask_feats[3])
     mask_feats[4] = self.solo_mask7(mask_feats[4])
     for i in range(5):
         cls_score[i] = F.upsample_bilinear(
             cls_score[i], (self.grid_num[i], self.grid_num[i]))
     return cls_score, mask_feats
Exemplo n.º 20
0
    def forward(self, x):
        x_share = self.frontend(x)

        x_auxiliary = self.auxiliary_backend(x_share)
        x_auxiliary = self.auxiliary_backend_output_layer(x_auxiliary)
        x_auxiliary_output = F.upsample_bilinear(x_auxiliary, size=(128, 128))  # 从32x32插值到128x128得到mask map

        x_trunk = x_share * x_auxiliary_output
        x_trunk_backend = self.trunk_backend(x_trunk)
        x_trunk_output = self.density_map_layer(x_trunk_backend)

        return x_auxiliary_output, x_trunk_output
Exemplo n.º 21
0
    def forward(self, *input):
        conv2_2, conv3_3, conv4_4, conv5_4 = input

        input = torch.cat([conv5_4, conv4_4], 1)
        input = self.conv1(input)
        input = self.conv2(input)
        input = F.upsample_bilinear(input, scale_factor=2)

        input = torch.cat(
            [input, conv3_3,
             F.upsample_bilinear(conv5_4, scale_factor=2)], 1)
        input = self.conv3(input)
        input = self.conv4(input)
        input = F.upsample_bilinear(input, scale_factor=2)

        input = torch.cat([input, conv2_2], 1)
        input = self.conv5(input)
        input = self.conv6(input)
        input = self.conv7(input)

        return input
    def forward(self, input, upsampling_shape):

        if self.conv_flag:
            self.basic_unit = self.attention_branch_base_unit(input)
        else:
            self.basic_unit = self.attention_branch_base_unit(input)

        self.base_unit = F.upsample_bilinear(self.basic_unit, size=upsampling_shape[2:])

        self.attention_weights = self.attention_mask(self.basic_unit, upsampling_shape)

        return self.attention_weights
Exemplo n.º 23
0
	def forward(self, x):
		
		#Size of input=1,num_classes,256,256	
		feats = self.feats(x) #1,128,64,64
		feat3 = self.feat3(feats)#1,256,32,32
		feat4 = self.feat4(feat3)#1,512,16,16
		feat5 = self.feat5(feat4)#1,512,8,8
		fconn = self.fconn(feat5)#1,4096,8,8
		
		score_feat3 = self.score_feat3(feat3)#1,num_classes,32,32
		score_feat4 = self.score_feat4(feat4)#1,num_classes,16,16
		score_fconn = self.score_fconn(fconn)#1,num_classes,8,8
		
		score = F.upsample_bilinear(score_fconn, score_feat4.size()[2:])
		score += score_feat4
		score = F.upsample_bilinear(score, score_feat3.size()[2:])
		score += score_feat3
				
		output = F.upsample_bilinear(score, x.size()[2:])#1,num_classes,256,256
				
		return output
Exemplo n.º 24
0
    def forward(self, x):
        space_nlb_1 = self.space_nlb_1(x)
        space_nlb_2 = self.space_nlb_2(x)
        space_nlb_3 = self.space_nlb_3(x)
        space_nlb_4 = self.space_nlb_4(x)

        space_nlb_1 = F.upsample_bilinear(space_nlb_1,
                                          size=space_nlb_1.size()[2:])
        space_nlb_2 = F.upsample_bilinear(space_nlb_2,
                                          size=space_nlb_1.size()[2:])
        space_nlb_3 = F.upsample_bilinear(space_nlb_3,
                                          size=space_nlb_1.size()[2:])
        space_nlb_4 = F.upsample_bilinear(space_nlb_4,
                                          size=space_nlb_1.size()[2:])
        x = F.upsample_bilinear(x, size=space_nlb_1.size()[2:])

        SF = self.spatial_fuse(
            torch.cat((x, space_nlb_1, space_nlb_2, space_nlb_3, space_nlb_4),
                      1))

        return SF
Exemplo n.º 25
0
    def forward(self, x):
        conv1 = self.conv_block1(x)  ##x shape:(6,3,96,96)
        conv2 = self.conv_block2(conv1)
        conv3 = self.conv_block3(conv2)
        conv4 = self.conv_block4(conv3)
        conv5 = self.conv_block5(conv4)

        score = self.classifier(conv5)  # score shape:(6,21,4,4)

        out = F.upsample_bilinear(score, x.size()[2:])  #out shape:(6,21,96,96)

        return out
    def forward(self, x):
        dec1 = self.dec1(x)
        dec2 = self.dec2(dec1)
        dec3 = self.dec3(dec2)
        dec4 = self.dec4(dec3)
        center = self.center(dec4)
        enc4 = self.enc4(
            torch.cat(
                [center, F.upsample_bilinear(dec4,
                                             center.size()[2:])], 1))
        enc3 = self.enc3(
            torch.cat([enc4, F.upsample_bilinear(dec3,
                                                 enc4.size()[2:])], 1))
        enc2 = self.enc2(
            torch.cat([enc3, F.upsample_bilinear(dec2,
                                                 enc3.size()[2:])], 1))
        enc1 = self.enc1(
            torch.cat([enc2, F.upsample_bilinear(dec1,
                                                 enc2.size()[2:])], 1))

        return F.upsample_bilinear(self.final(enc1), x.size()[2:])
Exemplo n.º 27
0
    def forward(self, x):
        convert = self.convert(x)
        conv1 = self.conv_1(convert)
        b1, c1, h1, w1 = conv1.size()
        pooling1 = self.pooling_1(conv1)
        conv2 = self.conv_2(pooling1)
        b2, c2, h2, w2 = conv2.size()
        pooling2 = self.pooling_2(conv2)
        conv3 = self.conv_3(pooling2)

        conv4 = self.conv_4(
            self.cat_1(
                torch.cat([F.upsample_bilinear(conv3, size=[h2, w2]), conv2],
                          dim=1)))
        conv5 = self.conv_5(
            self.cat_2(
                torch.cat([F.upsample_bilinear(conv4, size=[h1, w1]), conv1],
                          dim=1)))
        rain_streak = self.out(conv5)

        return rain_streak
Exemplo n.º 28
0
    def forward(self, x):
        conv1 = self.conv_block1(x)
        conv2 = self.conv_block2(conv1)
        conv3 = self.conv_block3(conv2)
        conv4 = self.conv_block4(conv3)
        conv5 = self.conv_block5(conv4)

        score = self.classifier(conv5)

        out = F.upsample_bilinear(score, x.size()[2:])

        return out
Exemplo n.º 29
0
    def forward(self, x):

        #256

        down1 = self.down1(x)
        out = F.max_pool2d(down1, kernel_size=2, stride=2)  #64

        down2 = self.down2(out)
        out = F.max_pool2d(down2, kernel_size=2, stride=2)  #32

        down3 = self.down3(out)
        out = F.max_pool2d(down3, kernel_size=2, stride=2)  #16

        down4 = self.down4(out)
        out = F.max_pool2d(down4, kernel_size=2, stride=2)  # 8

        out = self.same(out)

        out = F.upsample_bilinear(out, scale_factor=2)  #16
        out = torch.cat([down4, out], 1)
        out = self.up4(out)

        out = F.upsample_bilinear(out, scale_factor=2)  #32
        out = torch.cat([down3, out], 1)
        out = self.up3(out)

        out = F.upsample_bilinear(out, scale_factor=2)  #64
        out = torch.cat([down2, out], 1)
        out = self.up2(out)

        out = F.upsample_bilinear(out, scale_factor=2)  #128
        out = torch.cat([down1, out], 1)
        out = self.up1(out)

        out = F.upsample_bilinear(out, scale_factor=2)  #256
        out = self.up0(out)

        out = self.classify(out)

        return out
Exemplo n.º 30
0
    def forward(self, x):
        input_size = x.size()[2:]
        x = self.block1(x)
        x = self.block2(x)
        x = self.block3(x)
        if self.scale <= 8:
            score_8s = self.score_8s(x)

        x = self.block4(x)
        if self.scale <= 16:
            score_16s = self.score_16s(x)

        x = self.block5(x)
        score_32s = self.score_32s(x)

        if self.scale == 32:
            score = F.upsample_bilinear(score_32s, input_size)
        elif self.scale == 16:
            score_16s += F.upsample_bilinear(score_32s, score_16s.size()[2:])
            score = F.upsample_bilinear(score_16s, input_size)
        elif self.scale == 8:
            score_16s += F.upsample_bilinear(score_32s, score_16s.size()[2:])
            score_8s += F.upsample_bilinear(score_16s, score_8s.size()[2:])
            score = F.upsample_bilinear(score_8s, input_size)

        return score
Exemplo n.º 31
0
    def forward(self, x):
        input_size = x.size()[2:]
        x = self.resnet.conv1(x)
        x = self.resnet.bn1(x)
        x = self.resnet.relu(x)
        x = self.resnet.maxpool(x)

        x = self.resnet.layer1(x)

        x = self.resnet.layer2(x)
        if self.scale <= 8:
            score_8s = self.score_8s(x)

        x = self.resnet.layer3(x)
        if self.scale <= 16:
            score_16s = self.score_16s(x)

        x = self.resnet.layer4(x)
        score_32s = self.score_32s(x)

        if self.scale == 32:
            score = F.upsample_bilinear(score_32s, input_size)
        elif self.scale == 16:
            score_16s += F.upsample_bilinear(score_32s, score_16s.size()[2:])
            score = F.upsample_bilinear(score_16s, input_size)
        elif self.scale == 8:
            score_16s += F.upsample_bilinear(score_32s, score_16s.size()[2:])
            score_8s += F.upsample_bilinear(score_16s, score_8s.size()[2:])
            score = F.upsample_bilinear(score_8s, input_size)

        return score
Exemplo n.º 32
0
    def forward(self, x):
        # ----normal forward----
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x_1 = self.layer1(x)
        x_2 = self.layer2(x_1)
        x_3 = self.layer3(x_2)
        x_4 = self.layer4(x_3)
        # ----normal forward----

        x_4 = self.gcn4(x_4)
        x_4 = self.br1_4(x_4)
        x_4_up = F.upsample_bilinear(x_4, x_3.size()[2:])

        x_3 = self.gcn3(x_3)
        x_3 = self.br1_3(x_3)
        x_3_skip = x_3 + x_4_up
        x_3_skip = self.br2_3(x_3_skip)
        x_3_up = F.upsample_bilinear(x_3_skip, x_2.size()[2:])

        x_2 = self.gcn2(x_2)
        x_2 = self.br1_2(x_2)
        x_2_skip = x_2 + x_3_up
        x_2_skip = self.br2_2(x_2_skip)
        x_2_up = F.upsample_bilinear(x_2_skip, x_1.size()[2:])

        x_1 = self.gcn1(x_1)
        x_1 = self.br1_1(x_1)
        x_1_skip = x_1 + x_2_up
        x_1_skip = self.br2_1(x_1_skip)
        x_1_up = F.upsample_bilinear(x_1_skip, scale_factor=2)

        x_out = self.br3_1(x_1_up)
        x_out = F.upsample_bilinear(x_out, scale_factor=2)
        x_out = self.br3_2(x_out)

        return x_out