def __init__(self,
                 nclass,
                 backbone='',
                 aux=False,
                 jpu=False,
                 pretrained_base=True,
                 M=3,
                 N=21,
                 **kwargs):
        super(CGNet, self).__init__()
        # stage 1
        self.stage1_0 = _ConvBNPReLU(3, 32, 3, 2, 1, **kwargs)
        self.stage1_1 = _ConvBNPReLU(32, 32, 3, 1, 1, **kwargs)
        self.stage1_2 = _ConvBNPReLU(32, 32, 3, 1, 1, **kwargs)

        self.sample1 = _InputInjection(1)
        self.sample2 = _InputInjection(2)
        self.bn_prelu1 = _BNPReLU(32 + 3, **kwargs)

        # stage 2
        self.stage2_0 = ContextGuidedBlock(32 + 3,
                                           64,
                                           dilation=2,
                                           reduction=8,
                                           down=True,
                                           residual=False,
                                           **kwargs)
        self.stage2 = nn.ModuleList()
        for i in range(0, M - 1):
            self.stage2.append(
                ContextGuidedBlock(64, 64, dilation=2, reduction=8, **kwargs))
        self.bn_prelu2 = _BNPReLU(128 + 3, **kwargs)

        # stage 3
        self.stage3_0 = ContextGuidedBlock(128 + 3,
                                           128,
                                           dilation=4,
                                           reduction=16,
                                           down=True,
                                           residual=False,
                                           **kwargs)
        self.stage3 = nn.ModuleList()
        for i in range(0, N - 1):
            self.stage3.append(
                ContextGuidedBlock(128,
                                   128,
                                   dilation=4,
                                   reduction=16,
                                   **kwargs))
        self.bn_prelu3 = _BNPReLU(256, **kwargs)

        self.head = nn.Sequential(nn.Dropout2d(0.1, False),
                                  nn.Conv2d(256, nclass, 1))

        self.__setattr__('exclusive', [
            'stage1_0', 'stage1_1', 'stage1_2', 'sample1', 'sample2',
            'bn_prelu1', 'stage2_0', 'stage2', 'bn_prelu2', 'stage3_0',
            'stage3', 'bn_prelu3', 'head'
        ])
예제 #2
0
    def __init__(self,
                 nclass,
                 backbone='',
                 aux=False,
                 jpu=False,
                 pretrained_base=False,
                 **kwargs):
        super(ESPNetV2, self).__init__()
        self.pretrained = eespnet(pretrained=pretrained_base, **kwargs)
        self.proj_L4_C = _ConvBNPReLU(256, 128, 1, **kwargs)
        self.pspMod = nn.Sequential(
            EESP(256, 128, stride=1, k=4, r_lim=7, **kwargs),
            _PSPModule(128, 128, **kwargs))
        self.project_l3 = nn.Sequential(nn.Dropout2d(0.1),
                                        nn.Conv2d(128, nclass, 1, bias=False))
        self.act_l3 = _BNPReLU(nclass, **kwargs)
        self.project_l2 = _ConvBNPReLU(64 + nclass, nclass, 1, **kwargs)
        self.project_l1 = nn.Sequential(
            nn.Dropout2d(0.1), nn.Conv2d(32 + nclass, nclass, 1, bias=False))

        self.aux = aux

        self.__setattr__('exclusive', [
            'proj_L4_C', 'pspMod', 'project_l3', 'act_l3', 'project_l2',
            'project_l1'
        ])
예제 #3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 k=4,
                 r_lim=7,
                 down_method='esp',
                 norm_layer=nn.BatchNorm2d):
        super(EESP, self).__init__()
        self.stride = stride
        n = int(out_channels / k)
        n1 = out_channels - (k - 1) * n
        assert down_method in ['avg', 'esp'
                               ], 'One of these is suppported (avg or esp)'
        assert n == n1, "n(={}) and n1(={}) should be equal for Depth-wise Convolution ".format(
            n, n1)
        self.proj_1x1 = _ConvBNPReLU(in_channels,
                                     n,
                                     1,
                                     stride=1,
                                     groups=k,
                                     norm_layer=norm_layer)

        map_receptive_ksize = {
            3: 1,
            5: 2,
            7: 3,
            9: 4,
            11: 5,
            13: 6,
            15: 7,
            17: 8
        }
        self.k_sizes = list()
        for i in range(k):
            ksize = int(3 + 2 * i)
            ksize = ksize if ksize <= r_lim else 3
            self.k_sizes.append(ksize)
        self.k_sizes.sort()
        self.spp_dw = nn.ModuleList()
        for i in range(k):
            dilation = map_receptive_ksize[self.k_sizes[i]]
            self.spp_dw.append(
                nn.Conv2d(n,
                          n,
                          3,
                          stride,
                          dilation,
                          dilation=dilation,
                          groups=n,
                          bias=False))
        self.conv_1x1_exp = _ConvBN(out_channels,
                                    out_channels,
                                    1,
                                    1,
                                    groups=k,
                                    norm_layer=norm_layer)
        self.br_after_cat = _BNPReLU(out_channels, norm_layer)
        self.module_act = nn.PReLU(out_channels)
        self.downAvg = True if down_method == 'avg' else False