def __init__(self, nclass, in_channels, inter_channels, dataset='city', norm_layer=nn.BatchNorm2d, **kwargs):
     super(_Head, self).__init__()  
     atrous_rates = (6, 12, 18)  
     self.aspp = ASPP(in_channels, atrous_rates, norm_layer, **kwargs)       
     self.auxlayer = _ConvBNReLU(inter_channels, 48, 1, 1)
     self.project = _ConvBNReLU(304, 256, 3, 3)
     self.reduce_conv = nn.Conv2d(256, nclass, 1, 1)        
     self.quant_cat = nn.quantized.FloatFunctional()   
    def __init__(self, in_channels, atrous_rates, norm_layer=nn.BatchNorm2d, **kwargs):
        super(_RASPP, self).__init__()
        out_channels = 256
        self.b0 = _ConvBNReLU(in_channels, out_channels, 1)


        rate1, rate2, rate3 = tuple(atrous_rates)
        self.b1 = _ConvBNReLU(in_channels, out_channels, 3, padding=rate1, dilation=rate1, norm_layer=norm_layer)
        self.b2 = _ConvBNReLU(in_channels, out_channels, 3, padding=rate2, dilation=rate2, norm_layer=norm_layer)
        self.b3 = _ConvBNReLU(in_channels, out_channels, 3, padding=rate3, dilation=rate3, norm_layer=norm_layer)
        self.b4 = _AsppPooling(in_channels, out_channels, norm_layer=norm_layer)

        self.project = nn.Sequential(
            _ConvBNReLU(5 * out_channels, out_channels, 1),
            nn.Dropout2d(0.1)
        )
        self.quant_cat = nn.quantized.FloatFunctional()
 def __init__(self, in_channels, norm_layer, dataset, **kwargs):
     super(_LRASPP, self).__init__()
     out_channels = 256//2
     self.b0 = _ConvBNReLU(in_channels, out_channels, 1,1)
     if dataset=='city':
         self.b1 = nn.Sequential(        
                 nn.AvgPool2d((37,37),(12,12)),            
                 _ConvBN(in_channels, out_channels, 1, 1, bias=False),
                 _Hsigmoid()
                 )             
     else:
         self.b1 = nn.Sequential(        
                 nn.AvgPool2d((25,25),(8,8)),            
                 _ConvBN(in_channels, out_channels, 1, 1, bias=False),
                 _Hsigmoid()
                 ) 
     self.quant_mul = nn.quantized.FloatFunctional() 
 def __init__(self, in_channels, out_channels, norm_layer, **kwargs):
     super(_AsppPooling, self).__init__()
     self.gap = nn.Sequential(
         nn.AdaptiveAvgPool2d(1),
         _ConvBNReLU(in_channels, out_channels, 1)
     )
Example #5
0
    def __init__(self,
                 num_classes=1000,
                 width_mult=1.0,
                 dilated=False,
                 norm_layer=nn.BatchNorm2d,
                 **kwargs):
        super(MobileNetV2, self).__init__()

        layer1_setting = [
            # t, c, n, s
            [1, 16, 1, 1]
        ]
        layer2_setting = [[6, 24, 2, 2]]

        layer3_setting = [[6, 32, 3, 2]]
        layer4_setting = [[6, 64, 4, 2], [6, 96, 3, 1]]
        if dilated:
            layer5_setting = [[6, 160, 3, 2], [6, 320 // 2, 1, 1]]
        else:
            layer5_setting = [[6, 160, 3, 2], [6, 320, 1, 1]]
        # building first layer
        self.in_channels = int(32 * width_mult) if width_mult > 1.0 else 32
        last_channels = int(1280 * width_mult) if width_mult > 1.0 else 1280
        self.conv1 = _ConvBNReLU(3,
                                 self.in_channels,
                                 3,
                                 2,
                                 1,
                                 relu6=True,
                                 norm_layer=norm_layer)

        # building inverted residual blocks
        self.layer1 = self._make_layer(InvertedResidual,
                                       layer1_setting,
                                       width_mult,
                                       norm_layer=norm_layer)
        self.layer2 = self._make_layer(InvertedResidual,
                                       layer2_setting,
                                       width_mult,
                                       norm_layer=norm_layer)
        self.layer3 = self._make_layer(InvertedResidual,
                                       layer3_setting,
                                       width_mult,
                                       norm_layer=norm_layer)
        if dilated:
            self.layer4 = self._make_layer(InvertedResidual,
                                           layer4_setting,
                                           width_mult,
                                           dilation=2,
                                           norm_layer=norm_layer)
            self.layer5 = self._make_layer(InvertedResidual,
                                           layer5_setting,
                                           width_mult,
                                           dilation=2,
                                           norm_layer=norm_layer)
        else:
            self.layer4 = self._make_layer(InvertedResidual,
                                           layer4_setting,
                                           width_mult,
                                           norm_layer=norm_layer)
            self.layer5 = self._make_layer(InvertedResidual,
                                           layer5_setting,
                                           width_mult,
                                           norm_layer=norm_layer)

        # building last several layers
        if not dilated:
            self.classifier = nn.Sequential(
                _ConvBNReLU(self.in_channels,
                            last_channels,
                            1,
                            relu6=True,
                            norm_layer=norm_layer), nn.AdaptiveAvgPool2d(1),
                nn.Dropout2d(0.2), nn.Conv2d(last_channels, num_classes, 1))
        self.dilated = dilated
        self._init_weight()
    def __init__(self,
                 nclass=1000,
                 mode='large',
                 width_mult=1.0,
                 dilated=False,
                 norm_layer=nn.BatchNorm2d,
                 RE=False,
                 **kwargs):
        super(MobileNetV3, self).__init__()
        if RE:
            if mode == 'large':
                layer1_setting = [
                    # k, exp_size, c, se, nl, s
                    [3, 16, 16, False, 'RE', 1],
                    [3, 64, 24, False, 'RE', 2],
                    [3, 72, 24, False, 'RE', 1],
                ]
                layer2_setting = [
                    [5, 72, 40, True, 'RE', 2],
                    [5, 120, 40, True, 'RE', 1],
                    [5, 120, 40, True, 'RE', 1],
                ]

                layer3_setting = [
                    [3, 240, 80, False, 'RE', 2],
                    [3, 200, 80, False, 'RE', 1],
                    [3, 184, 80, False, 'RE', 1],
                    [3, 184, 80, False, 'RE', 1],
                    [3, 480, 112, True, 'RE', 1],
                    [3, 672, 112, True, 'RE', 1],
                ]
                if dilated:  #Reduce by Factor of 2
                    layer4_setting = [
                        [5, 672, 160, True, 'RE', 2],
                        [5, 960, 160, True, 'RE', 1],
                        [5, 960 // 2, 160 // 2, True, 'RE', 1],
                    ]
                else:
                    layer4_setting = [
                        [5, 672, 160, True, 'RE', 2],
                        [5, 960, 160, True, 'RE', 1],
                        [5, 960, 160, True, 'RE', 1],
                    ]

            elif mode == 'small':
                layer1_setting = [
                    # k, exp_size, c, se, nl, s
                    [3, 16, 16, True, 'RE', 2],
                ]

                layer2_setting = [
                    [3, 72, 24, False, 'RE', 2],
                    [3, 88, 24, False, 'RE', 1],
                ]
                layer3_setting = [
                    [5, 96, 40, True, 'RE', 2],
                    [5, 240, 40, True, 'RE', 1],
                    [5, 240, 40, True, 'RE', 1],
                    [5, 120, 48, True, 'RE', 1],
                    [5, 144, 48, True, 'RE', 1],
                ]
                if dilated:
                    layer4_setting = [
                        [5, 288, 96, True, 'RE', 2],
                        [5, 576, 96, True, 'RE', 1],
                        [5, 576 // 2, 96 // 2, True, 'RE', 1],
                    ]
                else:
                    layer4_setting = [
                        [5, 288, 96, True, 'RE', 2],
                        [5, 576, 96, True, 'RE', 1],
                        [5, 576, 96, True, 'RE', 1],
                    ]
            else:
                raise ValueError('Unknown mode.')
        else:
            if mode == 'large':
                layer1_setting = [
                    # k, exp_size, c, se, nl, s
                    [3, 16, 16, False, 'RE', 1],
                    [3, 64, 24, False, 'RE', 2],
                    [3, 72, 24, False, 'RE', 1],
                ]
                layer2_setting = [
                    [5, 72, 40, True, 'RE', 2],
                    [5, 120, 40, True, 'RE', 1],
                    [5, 120, 40, True, 'RE', 1],
                ]

                layer3_setting = [
                    [3, 240, 80, False, 'HS', 2],
                    [3, 200, 80, False, 'HS', 1],
                    [3, 184, 80, False, 'HS', 1],
                    [3, 184, 80, False, 'HS', 1],
                    [3, 480, 112, True, 'HS', 1],
                    [3, 672, 112, True, 'HS', 1],
                ]
                if dilated:  #Reduce by Factor of 2
                    layer4_setting = [
                        [5, 672, 160, True, 'HS', 2],
                        [5, 960, 160, True, 'HS', 1],
                        [5, 960 // 2, 160 // 2, True, 'HS', 1],
                    ]
                else:
                    layer4_setting = [
                        [5, 672, 160, True, 'HS', 2],
                        [5, 960, 160, True, 'HS', 1],
                        [5, 960, 160, True, 'HS', 1],
                    ]

            elif mode == 'small':
                layer1_setting = [
                    # k, exp_size, c, se, nl, s
                    [3, 16, 16, True, 'RE', 2],
                ]

                layer2_setting = [
                    [3, 72, 24, False, 'RE', 2],
                    [3, 88, 24, False, 'RE', 1],
                ]
                layer3_setting = [
                    [5, 96, 40, True, 'HS', 2],
                    [5, 240, 40, True, 'HS', 1],
                    [5, 240, 40, True, 'HS', 1],
                    [5, 120, 48, True, 'HS', 1],
                    [5, 144, 48, True, 'HS', 1],
                ]
                if dilated:
                    layer4_setting = [
                        [5, 288, 96, True, 'HS', 2],
                        [5, 576, 96, True, 'HS', 1],
                        [5, 576 // 2, 96 // 2, True, 'HS', 1],
                    ]
                else:
                    layer4_setting = [
                        [5, 288, 96, True, 'HS', 2],
                        [5, 576, 96, True, 'HS', 1],
                        [5, 576, 96, True, 'HS', 1],
                    ]
            else:
                raise ValueError('Unknown mode.')

        # building first layer
        self.in_channels = int(16 * width_mult) if width_mult > 1.0 else 16
        if RE:
            self.conv1 = _ConvBNReLU(3,
                                     self.in_channels,
                                     3,
                                     2,
                                     1,
                                     norm_layer=norm_layer)
        else:
            self.conv1 = _ConvBNHswish(3,
                                       self.in_channels,
                                       3,
                                       2,
                                       1,
                                       norm_layer=norm_layer)

        # building bottleneck blocks
        self.layer1 = self._make_layer(Bottleneck,
                                       layer1_setting,
                                       width_mult,
                                       norm_layer=norm_layer)
        self.layer2 = self._make_layer(Bottleneck,
                                       layer2_setting,
                                       width_mult,
                                       norm_layer=norm_layer)
        self.layer3 = self._make_layer(Bottleneck,
                                       layer3_setting,
                                       width_mult,
                                       norm_layer=norm_layer)
        if dilated:
            self.layer4 = self._make_layer(Bottleneck,
                                           layer4_setting,
                                           width_mult,
                                           dilation=2,
                                           norm_layer=norm_layer)
        else:
            self.layer4 = self._make_layer(Bottleneck,
                                           layer4_setting,
                                           width_mult,
                                           norm_layer=norm_layer)

        # building last several layers
        classifier = list()
        if mode == 'large':
            if dilated:
                last_bneck_channels = int(
                    960 // 2 * width_mult) if width_mult > 1.0 else 960 // 2
            else:
                last_bneck_channels = int(
                    960 * width_mult) if width_mult > 1.0 else 960
            if RE:
                self.layer5 = _ConvBNReLU(self.in_channels,
                                          last_bneck_channels,
                                          1,
                                          norm_layer=norm_layer)
            else:
                self.layer5 = _ConvBNHswish(self.in_channels,
                                            last_bneck_channels,
                                            1,
                                            norm_layer=norm_layer)
            if not dilated:
                classifier.append(nn.AdaptiveAvgPool2d(1))
                classifier.append(nn.Conv2d(last_bneck_channels, 1280, 1))
                classifier.append(_Hswish(True))
                classifier.append(nn.Conv2d(1280, nclass, 1))
        elif mode == 'small':
            if dilated:
                last_bneck_channels = int(
                    576 // 2 * width_mult) if width_mult > 1.0 else 576 // 2
            else:
                last_bneck_channels = int(
                    576 * width_mult) if width_mult > 1.0 else 576
            if RE:
                self.layer5 = _ConvBNReLU(self.in_channels,
                                          last_bneck_channels,
                                          1,
                                          norm_layer=norm_layer)
            else:
                self.layer5 = _ConvBNHswish(self.in_channels,
                                            last_bneck_channels,
                                            1,
                                            norm_layer=norm_layer)
            if not dilated:
                classifier.append(SEModule(last_bneck_channels))
                classifier.append(nn.AdaptiveAvgPool2d(1))
                classifier.append(nn.Conv2d(last_bneck_channels, 1024, 1))
                classifier.append(_Hswish(True))
                classifier.append(nn.Conv2d(1024, nclass, 1))
        else:
            raise ValueError('Unknown mode.')
        self.mode = mode
        if not dilated:
            self.classifier = nn.Sequential(*classifier)
        self.dilated = dilated

        self._init_weights()