Esempio n. 1
0
    def __init__(self, backbone='resnet101', output_stride=16, num_classes=21,
                    bn='bn', freeze_bn=False, modal_num=3):
        super(DeepLab, self).__init__()
        if backbone == 'drn':
            output_stride = 8
        self.best_iou = 0
        if bn == 'sync_bn':
            BatchNorm = SynchronizedBatchNorm2d
        elif bn == 'bn':
            BatchNorm = nn.BatchNorm2d
        elif bn == 'gn':
            BatchNorm = nn.GroupNorm
        else:
            raise NotImplementedError('batch norm choice {} is not implemented'.format(bn))

        self.backbone = build_backbone(backbone, output_stride, BatchNorm)

        # aspp/decoder-branches
        self.modal_num = modal_num
        self.aspps = []
        self.decoders = []
        for item in range(modal_num): 
            self.aspps.append(build_aspp(backbone, output_stride, BatchNorm))
            self.decoders.append(build_decoder(num_classes, backbone, BatchNorm))
        self.aspps = nn.ModuleList(self.aspps)
        self.decoders = nn.ModuleList(self.decoders)

        # attention-branch
        self.attention_decoder = build_attention_decoder(num_classes, modal_num, backbone, BatchNorm)

        if freeze_bn:
            self.freeze_bn()
Esempio n. 2
0
File: nets.py Progetto: xw-hu/FSDNet
    def __init__(self,
                 backbone='mobilenet',
                 output_stride=8,
                 num_classes=1,
                 sync_bn=True,
                 freeze_bn=False):
        super(ShadowNet2, self).__init__()
        if backbone == 'drn':
            output_stride = 8

        if sync_bn == True:
            BatchNorm = SynchronizedBatchNorm2d
        else:
            BatchNorm = nn.BatchNorm2d

        self.backbone = build_backbone(backbone, output_stride, BatchNorm)
        self.aspp = build_aspp(backbone, output_stride, BatchNorm)

        self.reduce1 = LayerConv(320, 256, 1, 1, 0, False)

        self.dsc = DSC_Module(256, 256)

        self.reduce2 = LayerConv(512, 256, 1, 1, 0, False)

        self.decoder = build_decoder(num_classes, backbone, BatchNorm)

        if freeze_bn:
            self.freeze_bn()
Esempio n. 3
0
    def __init__(self,
                 backbone='resnet',
                 n_in_channels=1,
                 output_stride=16,
                 num_classes=1,
                 n_bottleneck_channels=1,
                 sync_bn=True,
                 freeze_bn=False,
                 pretrained_backbone=False):
        super(DeepLabBottleNeck, self).__init__()
        if backbone == 'drn':
            output_stride = 8

        if sync_bn == True:
            BatchNorm = SynchronizedBatchNorm2d
        else:
            BatchNorm = nn.BatchNorm2d

        self.backbone = build_backbone(backbone, n_in_channels, output_stride,
                                       BatchNorm, pretrained_backbone)
        self.aspp = build_aspp(backbone, output_stride, BatchNorm)
        self.decoder = build_decoder(num_classes, backbone, BatchNorm,
                                     n_bottleneck_channels)
        self.activate_tanh = nn.Tanh()
        self.activate_sigmoid = nn.Sigmoid()

        self.freeze_bn = freeze_bn
Esempio n. 4
0
    def __init__(self,
                 backbone='resnet101',
                 output_stride=16,
                 num_classes=21,
                 bn='bn',
                 freeze_bn=False):
        super(DeepLab, self).__init__()
        if backbone == 'drn':
            output_stride = 8
        self.best_iou = 0
        if bn == 'sync_bn':
            BatchNorm = SynchronizedBatchNorm2d
        # elif bn == 'sync_abn':
        #     BatchNorm = InPlaceABNSync
        elif bn == 'bn':
            BatchNorm = nn.BatchNorm2d
        # elif bn == 'abn':
        #     BatchNorm = InPlaceABN
        elif bn == 'gn':
            BatchNorm = nn.GroupNorm
        else:
            raise NotImplementedError(
                'batch norm choice {} is not implemented'.format(bn))

        self.backbone = build_backbone(backbone, output_stride, BatchNorm)
        # self.backbone._load_pretrained_model()
        self.aspp = build_aspp(backbone, output_stride, BatchNorm)
        self.decoder = build_decoder(num_classes, backbone, BatchNorm)

        if freeze_bn:
            self.freeze_bn()
Esempio n. 5
0
File: nets.py Progetto: xw-hu/FSDNet
    def __init__(self,
                 backbone='mobilenet',
                 output_stride=8,
                 num_classes=1,
                 sync_bn=True,
                 freeze_bn=False):
        super(basic_ASPP, self).__init__()
        if backbone == 'drn':
            output_stride = 8

        if sync_bn == True:
            BatchNorm = SynchronizedBatchNorm2d
        else:
            BatchNorm = nn.BatchNorm2d

        self.backbone = build_backbone(backbone, output_stride, BatchNorm)

        self.aspp = build_aspp(backbone, output_stride, BatchNorm)

        self.last_conv = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1,
                      bias=False),
            BatchNorm(256),
            nn.ReLU(),
            # nn.Dropout(0.5),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1,
                      bias=False),
            BatchNorm(256),
            nn.ReLU(),
            nn.Conv2d(256, num_classes, kernel_size=1, stride=1))

        if freeze_bn:
            self.freeze_bn()
Esempio n. 6
0
    def __init__(self, block, NoLabels):
        super(MS_Deeplab_ms, self).__init__()
        self.Scale = ResNet_ms(block, [3, 4, 23, 3], NoLabels,
                               in_channel=3)  #changed to fix #4
        self.aspp = build_aspp(output_stride=16)

        self.branch = nn.Sequential(
            nn.Conv2d(2048, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, 512, kernel_size=1),
            nn.BatchNorm2d(512), nn.ReLU())

        self.fuse = nn.Sequential(nn.Conv2d(1024, 1024, kernel_size=1),
                                  nn.BatchNorm2d(1024), nn.ReLU(),
                                  nn.Conv2d(1024, 2048, kernel_size=1),
                                  nn.BatchNorm2d(2048), nn.ReLU())

        self.refine = nn.Sequential(
            #nn.Conv2d(48+256, 256, kernel_size=3, stride=1, padding=1, bias=False),
            nn.Conv2d(256 + 256,
                      256,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1,
                      bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU())

        self.predict = nn.Sequential(
            #nn.Conv2d(48+256, 256, kernel_size=3, stride=1, padding=1, bias=False),
            nn.Conv2d(128 + 64,
                      128,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1,
                      bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, NoLabels, kernel_size=1))

        #self.register_buffer('mean', torch.FloatTensor([0.485, 0.456, 0.406, -0.329]).view(1,4,1,1))
        #self.register_buffer('std', torch.FloatTensor([0.229, 0.224, 0.225, 0.051]).view(1,4,1,1))
        self.register_buffer(
            'mean',
            torch.FloatTensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
        self.register_buffer(
            'std',
            torch.FloatTensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))

        self._init_weight()
Esempio n. 7
0
    def __init__(self, build_backbone_fn, output_stride=16, num_classes=21):
        super(DeepLabV3, self).__init__()

        self.backbone = build_backbone_fn(output_stride)
        assert hasattr(self.backbone, "out_planes")
        assert hasattr(self.backbone, "low_level_out_planes")
        self.aspp = build_aspp(self.backbone.out_planes, output_stride)
        self.decoder = build_decoder(num_classes,
                                     self.backbone.low_level_out_planes)
    def __init__(self,
                 backbone='resnet18',
                 in_channels=3,
                 output_stride=16,
                 num_classes=1,
                 aux_classes=3,
                 sync_bn=True,
                 freeze_bn=False,
                 pretrained=False,
                 fusion_type='fusion',
                 is_concat=False,
                 **kwargs):
        super(PairwiseDeepLab, self).__init__()
        if backbone == 'drn':
            output_stride = 8

        if sync_bn == True:
            BatchNorm = SynchronizedBatchNorm2d
        else:
            BatchNorm = nn.BatchNorm2d

        self.backbone = build_backbone(backbone, in_channels, output_stride,
                                       BatchNorm, pretrained)

        ## branch1
        self.aspp = build_aspp(backbone, output_stride, BatchNorm)
        self.decoder = build_decoder(num_classes, backbone, BatchNorm)

        ## branch2
        # self.br2_aspp = build_aspp(backbone, output_stride, BatchNorm)
        # self.br2_decoder = build_decoder(num_classes, backbone, BatchNorm)

        ## fusion
        self.fusion_type = fusion_type
        if self.fusion_type == 'attention_fusion':
            print('fusion_type is attention_fusion')
            self.fusion = build_attention_fusion(aux_classes,
                                                 backbone,
                                                 BatchNorm,
                                                 is_concat=is_concat)
        elif self.fusion_type == 'fusion':
            print('init fusion_type')
            self.fusion = build_fusion(aux_classes,
                                       backbone,
                                       BatchNorm,
                                       is_concat=is_concat)
        else:
            raise NotImplementedError

        if freeze_bn:
            self.freeze_bn()
Esempio n. 9
0
File: nets.py Progetto: xw-hu/FSDNet
    def __init__(self,
                 backbone='mobilenet',
                 output_stride=8,
                 num_classes=1,
                 sync_bn=True,
                 freeze_bn=False):
        super(ShadowNetUncertaintyGuide, self).__init__()
        if backbone == 'drn':
            output_stride = 8

        if sync_bn == True:
            BatchNorm = SynchronizedBatchNorm2d
        else:
            BatchNorm = nn.BatchNorm2d

        self.backbone = build_backbone(backbone, output_stride, BatchNorm)

        self.temp_predict = nn.Sequential(
            nn.Conv2d(320, 256, kernel_size=3, stride=1, padding=1,
                      bias=False), BatchNorm(256), nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1,
                      bias=False), BatchNorm(256), nn.ReLU(),
            nn.Conv2d(256, num_classes, kernel_size=1, stride=1))
        self.temp_uncertainty = nn.Sequential(
            nn.Conv2d(320, 256, kernel_size=3, stride=1, padding=1,
                      bias=False), BatchNorm(256), nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1,
                      bias=False), BatchNorm(256), nn.ReLU(),
            nn.Conv2d(256, num_classes, kernel_size=1, stride=1))

        self.aspp = build_aspp(backbone, output_stride, BatchNorm)

        self.reduce1 = LayerConv(320, 256, 1, 1, 0, False)

        self.dsc = DSC_Module(256, 256)

        self.reduce2 = LayerConv(512, 256, 1, 1, 0, False)

        self.decoder = build_decoder(num_classes, backbone, BatchNorm)

        # self.last_conv = nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
        #                                BatchNorm(256),
        #                                nn.ReLU(),
        #                                # nn.Dropout(0.5),
        #                                nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
        #                                BatchNorm(256),
        #                                nn.ReLU(),
        #                                nn.Conv2d(256, num_classes, kernel_size=1, stride=1))

        if freeze_bn:
            self.freeze_bn()
Esempio n. 10
0
    def __init__(self,
                 backbone='resnet',
                 n_in_channels=1,
                 output_stride=16,
                 num_classes=1,
                 pretrained_backbone=False):
        super(DeepLab, self).__init__()
        if backbone == 'drn':
            output_stride = 8

        BatchNorm = nn.BatchNorm2d
        self.backbone = build_backbone(backbone, n_in_channels, output_stride,
                                       BatchNorm, pretrained_backbone)
        self.aspp = build_aspp(backbone, output_stride, BatchNorm)
        self.decoder = build_decoder(num_classes, backbone, BatchNorm)
        return
Esempio n. 11
0
    def __init__(self,
                 backbone='resnet',
                 output_stride=16,
                 num_class=21,
                 sync_bn=True,
                 freeze_bn=False):
        super(DeepLab, self).__init__()
        if sync_bn:
            batch_norm = SynchronizedBatchNorm2d
        else:
            batch_norm = nn.BatchNorm2d

        self.backbone = build_backbone(backbone, output_stride, batch_norm)
        self.aspp = build_aspp(backbone, output_stride, batch_norm)
        self.decoder = build_decoder(num_class, backbone, batch_norm)
        self.freeze_bn = freeze_bn
Esempio n. 12
0
    def __init__(self, backbone='resnet18', in_channels=3, output_stride=8, num_classes=1,
                 sync_bn=True, freeze_bn=False, pretrained=False, **kwargs):
        super(ConsistentDeepLab, self).__init__()
        if backbone in ['drn', 'resnet18', 'resnet34']:
            output_stride = 8

        if sync_bn == True:
            BatchNorm = SynchronizedBatchNorm2d
        else:
            BatchNorm = nn.BatchNorm2d

        self.backbone = build_backbone(backbone, in_channels, output_stride, BatchNorm, pretrained)
        self.aspp = build_aspp(backbone, output_stride, BatchNorm)
        self.decoder = build_decoder(num_classes, backbone, BatchNorm)

        if freeze_bn:
            self.freeze_bn()
Esempio n. 13
0
    def __init__(self, backbone='resnet', output_stride=16, num_classes=21,
                 sync_bn=True, freeze_bn=False, pretrained=False):
        super(DeepLab, self).__init__()
        if backbone == 'drn':
            output_stride = 8

        if sync_bn == True:
            BatchNorm = SynchronizedBatchNorm2d
        else:
            BatchNorm = nn.BatchNorm2d

        self.backbone = build_backbone(backbone, output_stride, BatchNorm,
                                       pretrained=pretrained)
        self.aspp = build_aspp(backbone, output_stride, BatchNorm)
        self.decoder = build_decoder(num_classes, backbone, BatchNorm)

        if freeze_bn:
            self.freeze_bn()
Esempio n. 14
0
    def __init__(self, NoLabels, pretrained=False):
        super(Siam_Deeplab, self).__init__()
        self.backbone = build_backbone('resnet_ms',
                                       in_channel=3,
                                       pretrained=pretrained)
        self.aspp = build_aspp(output_stride=16)

        self.conv1_1 = nn.Sequential(nn.Conv2d(2048, 1024, kernel_size=1),
                                     nn.BatchNorm2d(1024), nn.ReLU(),
                                     nn.Conv2d(1024, 512, kernel_size=1),
                                     nn.BatchNorm2d(512), nn.ReLU())

        self.branch = nn.Sequential(
            nn.Conv2d(1024, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, 512, kernel_size=1),
            nn.BatchNorm2d(512), nn.ReLU())

        self.fuse = nn.Sequential(nn.Conv2d(1024, 512, kernel_size=1),
                                  nn.BatchNorm2d(512), nn.ReLU(),
                                  nn.Conv2d(512, 64, kernel_size=1),
                                  nn.BatchNorm2d(64), nn.ReLU())

        self.fuse2 = nn.Sequential(
            nn.Conv2d(256 + 128, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256), nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256),
            nn.ReLU())

        #self.template_refine= nn.Sequential(
        #        #nn.Conv2d(48+256, 256, kernel_size=3, stride=1, padding=1, bias=False),
        #        nn.Conv2d(1024, 256, kernel_size=7, stride=2, padding=3, bias=False),
        #        nn.BatchNorm2d(256),
        #        nn.ReLU(),
        #        #nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True), # change
        #        nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1, bias=False),
        #        nn.BatchNorm2d(128),
        #        nn.ReLU())

        self.template_fuse = nn.Sequential(
            nn.Conv2d(1024,
                      512,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=False),
            nn.BatchNorm2d(512),
            nn.ReLU(),
            #nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True), # change
            nn.Conv2d(512, 64, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU())
        #nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True))

        self.refine = nn.Sequential(
            nn.Conv2d(256 + 256,
                      256,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=False), nn.BatchNorm2d(256), nn.ReLU(),
            nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1,
                      bias=False), nn.BatchNorm2d(128), nn.ReLU())

        self.predict = nn.Sequential(
            #nn.Conv2d(48+256, 256, kernel_size=3, stride=1, padding=1, bias=False),
            nn.Conv2d(128 + 64,
                      128,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1,
                      bias=False),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, NoLabels, kernel_size=1))

        #self.register_buffer('mean', torch.FloatTensor([0.485, 0.456, 0.406, -0.329]).view(1,4,1,1))
        #self.register_buffer('std', torch.FloatTensor([0.229, 0.224, 0.225, 0.051]).view(1,4,1,1))
        self.register_buffer(
            'mean',
            torch.FloatTensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
        self.register_buffer(
            'std',
            torch.FloatTensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))

        self._init_weight()