Exemplo n.º 1
0
 def __init__(self, num_input_features, growth_rate, bottle_neck_size, 
              dropout_rate, bias, memory_efficient ) :
     super(_DenseLayer, self).__init__()
     if bottle_neck_size > 0 :
         self.bottle_neck = True
         self.add_module('bottle_neck_layer', BottleNeck(num_input_features, bottle_neck_size, 
                                                   growth_rate, bias, ) )
         self.add_module('layer', nn.Sequential( 
             #nn.BatchNorm2d(bottle_neck_size * growth_rate),
             #nn.ReLU(inplace=True),
             InPlaceABN(bottle_neck_size * growth_rate),
             nn.Conv2d(bottle_neck_size * growth_rate, growth_rate,
                       kernel_size=3, stride=1, padding=1, bias=bias,),
             nn.Dropout(dropout_rate, inplace = True),
         ) )
     else :
         self.bottle_neck = False
         self.add_module('layer', nn.Sequential( 
             #nn.BatchNorm2d(num_input_features),
             #nn.ReLU(inplace=True),
             InPlaceABN(num_input_features),
             nn.Conv2d(num_input_features, growth_rate,
                       kernel_size=3, stride=1, padding=1, bias=bias,),
             nn.Dropout(dropout_rate, inplace = True),
         ) )
     self.memory_efficient = memory_efficient
Exemplo n.º 2
0
 def __init__(self):
     super().__init__()
     # Separable Conv
     self.conv_1 = DepthwiseSeparableConv(128, 128, 3, padding=1)
     self.conv_2 = DepthwiseSeparableConv(128, 128, 3, padding=1)
     # Inplace BN + Leaky Relu
     self.abn_1 = InPlaceABN(128)
     self.abn_2 = InPlaceABN(128)
Exemplo n.º 3
0
 def __init__(self, cfg):
     super().__init__()
     # self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2)
     self.flatten = nn.Flatten()
     in_channel = 256 * int((cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION))**2
     self.first_fc = nn.Linear(in_channel, 1024)
     self.first_iabn = InPlaceABN(1024)
     self.second_fc = nn.Linear(1024, 1024)
     self.second_iabn = InPlaceABN(1024)
Exemplo n.º 4
0
 def __init__(self, num_features, bias = False) :
     super(ResLayer, self).__init__()
     self.add_module('Bottle_neck', nn.Sequential(
         nn.Conv2d(num_features, 64, 
                   kernel_size=1, stride = 1, padding = 0, bias = bias),
         InPlaceABN(64),
         nn.Conv2d(64, 64, 
                   kernel_size=3, stride = 1, padding = 1, bias = bias),
         InPlaceABN(64),
         nn.Conv2d(64, num_features, 
                   kernel_size=1, stride = 1, padding = 0, bias = bias), ) )
     self.add_module('ABN', InPlaceABN(num_features) )
Exemplo n.º 5
0
    def __init__(self, growth_rate=12, DenseBlock_layer_num=(40,40,40), 
                 bottle_neck_size=4, dropout_rate=0.2, compression_rate=0.5, num_init_features = 16,
                 num_input_features=3, num_classes=10, bias=False, memory_efficient=False) :
        super(DenseNet, self).__init__()
        self.features_layers = nn.Sequential(OrderedDict([
            ('conv0', nn.Conv2d(num_input_features,
                                num_init_features, 
                                kernel_size=5, 
                                stride=1,
                                padding=2,
                                bias=bias, )),
            #('norm0', nn.BatchNorm2d(num_init_features)),
            #('relu0', nn.ReLU(inplace=True)),
            ('abn0', InPlaceABN(num_init_features), ),
            #('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)), ## for ImageNet
        ]))
        
        num_features = num_init_features
        for idx, num_layers in enumerate( DenseBlock_layer_num ) :
            layer = DenseBlock( num_features, 
                               num_layers, 
                               growth_rate, 
                               bottle_neck_size,
                               dropout_rate, 
                               bias, 
                               memory_efficient)
            self.features_layers.add_module('DenseBlock%d' % idx, layer)
            num_features += growth_rate * num_layers
            if idx < len(DenseBlock_layer_num) - 1 :
                num_output_features=int((1-compression_rate) * num_features)
                Transition_layer = Transition(num_features, num_output_features, bias)
                self.features_layers.add_module('Transition%d' % idx, Transition_layer)
                num_features = num_output_features
        idx+=1
        #self.features_layers.add_module('norm%d' % idx, nn.BatchNorm2d(num_features))
        #self.features_layers.add_module('relu%d' % idx, nn.ReLU(inplace=True))
        self.add_module('abn%d' % idx, InPlaceABN(num_features), )
        self.features_layers.add_module('GlobalAvgPool%d' % idx, nn.AdaptiveAvgPool2d(1))
        
        self.classifier = nn.Linear(num_features, num_classes, bias=True,)

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, InPlaceABN):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.bias, 0)
Exemplo n.º 6
0
 def __init__(self, num_classes):
     super().__init__()
     self.conv_iabn_layers = nn.ModuleList([])
     for i in range(4):
         separable_conv = DepthwiseSeparableConv(in_channels=256,
                                                 out_channels=256,
                                                 kernel_size=3,
                                                 padding=1)
         self.conv_iabn_layers.append(separable_conv)
         iabn = InPlaceABN(256)
         self.conv_iabn_layers.append(iabn)
     self.deconv = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2)
     self.last_iabn = InPlaceABN(256)
     self.last_conv = nn.Conv2d(256, num_classes + 1, kernel_size=1)
Exemplo n.º 7
0
def conv2d_ABN(ni, nf, stride, activation="leaky_relu", kernel_size=3, activation_param=1e-2, groups=1):
    activation_param = 1e-6
    return nn.Sequential(
        nn.Conv2d(ni, nf, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=groups,
                  bias=False),
        InPlaceABN(num_features=nf, activation=activation, activation_param=activation_param)
    )
    def __init__(
        self,
        in_channels,
        out_channels,
        kernel_size,
        padding=0,
        stride=1,
        use_batchnorm=True,
    ):

        if use_batchnorm == "inplace" and InPlaceABN is None:
            raise RuntimeError(
                "In order to use `use_batchnorm='inplace'` inplace_abn package must be installed. "
                + "To install see: https://github.com/mapillary/inplace_abn")
        if use_batchnorm == "inplace":
            bn = InPlaceABN(out_channels,
                            activation="leaky_relu",
                            activation_param=0.0)
            relu = nn.Identity()
        elif use_batchnorm and use_batchnorm != "inplace":
            bn = nn.BatchNorm2d(out_channels)
        else:
            bn = nn.Identity()

        relu = nn.ReLU(inplace=True)

        conv = nn.Conv2d(
            in_channels,
            out_channels,
            kernel_size,
            stride=stride,
            padding=padding,
            bias=not (use_batchnorm),
        )
        super(PreActivatedConv2dReLU, self).__init__(conv, bn, relu)
Exemplo n.º 9
0
def conv_abn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1, activation="leaky_relu", activation_param=1e-3):
    if padding is None:
        padding = ((stride - 1) + dilation * (k - 1)) // 2
    return nn.Sequential(
        nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False),
        InPlaceABN(num_features=out_chs, activation=activation, activation_param=activation_param)
    )
Exemplo n.º 10
0
 def __init__(self, num_input_features, bottle_neck_size, growth_rate, bias,) :
     super(BottleNeck, self).__init__()
     #self.add_module('norm', nn.BatchNorm2d(num_input_features), )
     #self.add_module('relu', nn.ReLU(inplace=True), )
     self.add_module('abn', InPlaceABN(num_input_features), )
     self.add_module('conv', nn.Conv2d(num_input_features, bottle_neck_size * growth_rate, 
                                       kernel_size=1, stride=1, bias=bias,), )
Exemplo n.º 11
0
def generate_backbone_EfficientPS(cfg):
    """
    Create an EfficientNet model base on this repository:
    https://github.com/lukemelas/EfficientNet-PyTorch

    Modify the existing Efficientnet base on the EfficientPS paper,
    ie:
    - replace BN and swish with InplaceBN and LeakyRelu
    - remove se (squeeze and excite) blocks
    Args:
    - cdg (Config) : config object
    Return:
    - backbone (nn.Module) : Modify version of the EfficentNet
    """

    if cfg.MODEL_CUSTOM.BACKBONE.LOAD_PRETRAIN:
        backbone = EfficientNet.from_pretrained('efficientnet-b{}'.format(
            cfg.MODEL_CUSTOM.BACKBONE.EFFICIENTNET_ID))
    else:
        backbone = EfficientNet.from_name('efficientnet-b{}'.format(
            cfg.MODEL_CUSTOM.BACKBONE.EFFICIENTNET_ID))

    backbone._bn0 = InPlaceABN(num_features=backbone._bn0.num_features,
                               eps=0.001)
    backbone._bn1 = InPlaceABN(num_features=backbone._bn1.num_features,
                               eps=0.001)
    backbone._swish = nn.Identity()
    for i, block in enumerate(backbone._blocks):
        # Remove SE block
        block.has_se = False
        # Additional step to have the correct number of parameter on compute
        block._se_reduce = nn.Identity()
        block._se_expand = nn.Identity()
        # Replace BN with Inplace BN (default activation is leaky relu)
        if '_bn0' in [name for name, layer in block.named_children()]:
            block._bn0 = InPlaceABN(num_features=block._bn0.num_features,
                                    eps=0.001)
        block._bn1 = InPlaceABN(num_features=block._bn1.num_features,
                                eps=0.001)
        block._bn2 = InPlaceABN(num_features=block._bn2.num_features,
                                eps=0.001)

        # Remove swish activation since Inplace BN contains the activation layer
        block._swish = nn.Identity()

    return backbone
Exemplo n.º 12
0
 def __init__(self, num_input_features, num_output_features, bias):
     super(Transition, self).__init__()
     #self.add_module('norm', nn.BatchNorm2d(num_input_features))
     #self.add_module('relu', nn.ReLU(inplace=True))
     self.add_module('abn', InPlaceABN(num_input_features), )
     self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
                                       kernel_size=1, stride=1, bias=bias,))
     self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
Exemplo n.º 13
0
 def __init__(self, cfg, input_shape):
     super().__init__(cfg, input_shape)
     # Modify version of the convolution and Inplace batchNorm
     in_channels = input_shape[0].channels
     self.conv = DepthwiseSeparableConv(in_channels,
                                        in_channels,
                                        kernel_size=3,
                                        padding=1)
     self.iabn = InPlaceABN(in_channels)
Exemplo n.º 14
0
    def __init__(self, num_layers = (2,4,6,4), num_features = (64,128,256,512), bias = False) :
        super(ResNet, self).__init__()
        self.features_layers = nn.Sequential(OrderedDict([('conv0', nn.Conv2d(3,num_features[0], 
                                                                    kernel_size=7, stride=1,
                                                                    padding = 3, bias = bias )),
                                                         ('ABN0', InPlaceABN(num_features[0]))]))
        self.features_layers.add_module('ResBlock0', ResBlock(num_layers[0], num_features[0]) )
        pre_num_feature = num_features[0]
        for i, (num_layer, num_feature) in enumerate(zip(num_layers[1:], num_features[1:]),1) :
            self.features_layers.add_module('conv%d'%i, nn.Conv2d(pre_num_feature,num_feature, 
                                                               kernel_size=3, stride=2,
                                                               padding = 1, bias = bias) )
            self.features_layers.add_module('ABN%d'%i, InPlaceABN(num_feature))
            #self.features_layers.add_module('pool%d'%i,     nn.AvgPool2d(2,2) )
        
            self.features_layers.add_module('ResBlock%d'%i, ResBlock(num_layer, num_feature) )
            pre_num_feature = num_feature
#         self.features_layers.add_module('conv2', nn.Conv2d(128, 256,
#                                                            kernel_size=3, stride=1,
#                                                            padding = 1, bias =False) )
#         self.features_layers.add_module('pool1',     nn.AvgPool2d(2,2) )
        
#         self.features_layers.add_module('ResBlock2', ResBlock(8, 256) )

#         self.features_layers.add_module('conv3', nn.Conv2d(256,512,
#                                                            kernel_size=3, stride=1,
#                                                            padding = 1, bias =False) )
#         self.features_layers.add_module('pool2',     nn.AvgPool2d(2,2) )
#         self.features_layers.add_module('ResBlock3', ResBlock(8, 512) )
        
        self.features_layers.add_module('GlobalPool', nn.AdaptiveAvgPool2d(1) )
        self.classifier = nn.Linear(num_features[-1],10,bias=False)
        
        
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, InPlaceABN):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.constant_(m.weight, 0)
Exemplo n.º 15
0
 def __init__(self):
     super().__init__()
     options = {'in_channels': 256, 'out_channels': 256, 'kernel_size': 3}
     self.conv_first = DepthwiseSeparableConv(dilation=(1, 6),
                                              padding=(1, 6),
                                              **options)
     self.iabn_first = InPlaceABN(256)
     # Branch 1
     self.conv_branch_1 = DepthwiseSeparableConv(padding=1, **options)
     self.iabn_branch_1 = InPlaceABN(256)
     # Branch 2
     self.conv_branch_2 = DepthwiseSeparableConv(dilation=(6, 21),
                                                 padding=(6, 21),
                                                 **options)
     self.iabn_branch_2 = InPlaceABN(256)
     #Branch 3
     self.conv_branch_3 = DepthwiseSeparableConv(dilation=(18, 15),
                                                 padding=(18, 15),
                                                 **options)
     self.iabn_branch_3 = InPlaceABN(256)
     # Branch 4
     self.conv_branch_4 = DepthwiseSeparableConv(dilation=(6, 3),
                                                 padding=(6, 3),
                                                 **options)
     self.iabn_branch_4 = InPlaceABN(256)
     # Last conv
     # There is some mismatch in the paper about the dimension of this conv
     # In the paper it says "This tensor is then finally passed through a
     # 1×1 convolution with 256 output channels and forms the output of the
     # DPC module." But the overall schema shows an output of 128
     # The MC module schema also show an input of 256.
     # In order to have 512 channel at the concatenation of all layers,
     # I choosed 128 output channels
     self.conv_last = nn.Conv2d(1280, 128, 1)
     self.iabn_last = InPlaceABN(128)
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 padding=0,
                 stride=1,
                 use_batchnorm=True,
                 act="relu",
                 **batchnorm_params):

        super().__init__()

        layers = [
            nn.Conv2d(in_channels,
                      out_channels,
                      kernel_size,
                      stride=stride,
                      padding=padding,
                      bias=not (use_batchnorm))
        ]

        if use_batchnorm == 'inplace':
            try:
                from inplace_abn import InPlaceABN
            except ImportError:
                raise RuntimeError(
                    "In order to use `use_batchnorm='inplace'` inplace_abn package must be installed. To install see: https://github.com/mapillary/inplace_abn"
                )

            layers.append(
                InPlaceABN(out_channels,
                           activation='leaky_relu',
                           activation_param=0.0,
                           **batchnorm_params))
        elif use_batchnorm:
            layers.append(nn.BatchNorm2d(out_channels, **batchnorm_params))

        if act == "elu":
            layers.append(nn.ELU(True))
        elif act == "swish":
            layers.append(Swish())
        else:
            layers.append(nn.ReLU(True))

        self.block = nn.Sequential(*layers)
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 **kwargs):

        super(DepthwiseSeparableConv, self).__init__()
        assert 'groups' not in kwargs, 'groups should not be specified'
        self.depthwise_conv = nn.Conv2d(in_channels,
                                        in_channels,
                                        kernel_size,
                                        stride=stride,
                                        padding=padding,
                                        dilation=dilation,
                                        groups=in_channels,
                                        **kwargs)

        self.iabn = InPlaceABN(in_channels)

        self.pointwise_conv = nn.Conv2d(in_channels, out_channels, 1, **kwargs)
Exemplo n.º 18
0
    def __init__(self):
        super(SKUNET100_DICE_BN_NEW_MOD4_EX3, self).__init__()

        self.relu = nn.ReLU(inplace=True)
        self.pixel_shuffle = nn.PixelShuffle(2)
        self.upsample = nn.Upsample(scale_factor=2, mode="trilinear")
        self.upsamplez = nn.Upsample(scale_factor=(1, 2, 2), mode="trilinear")
        #        self.act1 = my_act()

        self.in_layer = nn.Conv3d(1, 32, kernel_size=5, padding=2)  # 32,320
        self.in_layer_bn = InPlaceABN(32)

        self.lconvlayer1 = nn.Conv3d(32,
                                     32,
                                     kernel_size=(5, 5, 5),
                                     stride=(1, 2, 2),
                                     padding=(2, 2, 2))  # 32,160
        self.lconvlayer1_bn = InPlaceABN(32)

        self.lconvlayer2 = nn.Conv3d(32,
                                     64,
                                     kernel_size=(5, 5, 5),
                                     stride=(1, 2, 2),
                                     padding=(2, 2, 2))  # 32,80
        self.lconvlayer2_bn = InPlaceABN(64)

        self.lconvlayer3 = nn.Conv3d(64,
                                     64,
                                     kernel_size=4,
                                     stride=2,
                                     padding=1)  # 16,40
        self.lconvlayer3_bn = InPlaceABN(64)

        self.lconvlayer4 = nn.Conv3d(64,
                                     128,
                                     kernel_size=4,
                                     stride=2,
                                     padding=1)  # 8,20
        self.lconvlayer4_bn = InPlaceABN(128)

        self.lconvlayer5 = nn.Conv3d(128,
                                     128,
                                     kernel_size=4,
                                     stride=2,
                                     padding=1)  # 4,10
        self.lconvlayer5_bn = InPlaceABN(128)

        self.lconvlayer6 = nn.Conv3d(128,
                                     128,
                                     kernel_size=4,
                                     stride=2,
                                     padding=1)  # 2, 5
        self.lconvlayer6_bn = InPlaceABN(128)

        # self.lconvlayer7 = nn.Conv3d(128, 256, kernel_size=4, stride=2, padding=1)  # 2, 5
        # self.lconvlayer7_bn = InPlaceABN(256)
        #
        # self.rconvTlayer7 = nn.Conv3d(256, 128, kernel_size=3, padding=1)
        self.rconvlayer7 = nn.Conv3d(128, 128, kernel_size=3, padding=1)
        self.rconvlayer7_bn = InPlaceABN(128)

        self.rconvTlayer6 = nn.Conv3d(128, 128, kernel_size=3, padding=1)
        self.rconvTlayer6_bn = InPlaceABN(128)
        self.rconvlayer6 = nn.Conv3d(128, 128, kernel_size=3, padding=1)
        self.rconvlayer6_bn = InPlaceABN(128)

        self.rconvTlayer5 = nn.Conv3d(128, 128, kernel_size=3, padding=1)
        self.rconvTlayer5_bn = InPlaceABN(128)
        self.rconvlayer5 = nn.Conv3d(128, 128, kernel_size=3, padding=1)
        self.rconvlayer5_bn = InPlaceABN(128)

        self.rconvTlayer4 = nn.Conv3d(128, 64, kernel_size=5, padding=2)
        self.rconvTlayer4_bn = InPlaceABN(64)
        self.rconvlayer4 = nn.Conv3d(64, 64, kernel_size=5, padding=2)
        self.rconvlayer4_bn = InPlaceABN(64)

        self.rconvTlayer3 = nn.Conv3d(64, 64, kernel_size=5, padding=2)
        self.rconvTlayer3_bn = InPlaceABN(64)
        self.rconvlayer3 = nn.Conv3d(64, 64, kernel_size=5, padding=2)
        self.rconvlayer3_bn = InPlaceABN(64)

        #        self.rconvTlayer2 = nn.ConvTranspose3d(64, 32, kernel_size = (1,2,2), stride = (1,2,2))
        self.rconvTlayer2 = nn.Conv3d(64, 32, kernel_size=5, padding=2)
        self.rconvTlayer2_bn = InPlaceABN(32)
        self.rconvlayer2 = nn.Conv3d(32, 32, kernel_size=5, padding=2)
        self.rconvlayer2_bn = InPlaceABN(32)

        self.rconvTlayer1 = nn.Conv3d(32, 32, kernel_size=5, padding=2)
        self.rconvTlayer1_bn = InPlaceABN(32)
        #        self.rconvTlayer1 = nn.ConvTranspose3d(32, 32, kernel_size = (1,2,2), stride = (1,2,2))
        self.rconvlayer1 = nn.Conv3d(32, 32, kernel_size=5, padding=2)
        self.rconvlayer1_bn = InPlaceABN(32)

        self.out_layer = nn.Conv3d(32, 1, kernel_size=1, stride=1)
Exemplo n.º 19
0
    def __init__(self, in_feature_shape):
        """
        Args:
        - in_feature_shape (List[int]) : size of feature at different levels
        """
        super().__init__()
        # Channel information are the one given in the EfficientPS paper
        # Depending on the EfficientNet model chosen the number of channel will
        # change
        # x4 size [B, 40, H, W] (input 40 channels)
        # Bottom up path layers
        self.conv_b_up_x4 = Conv2d(in_feature_shape[1], 256, 1)
        self.iabn_b_up_x4 = InPlaceABN(256)

        # Top down path layers
        self.conv_t_dn_x4 = Conv2d(in_feature_shape[1], 256, 1)
        self.iabn_t_dn_x4 = InPlaceABN(256)

        # x8 size [B, 64, H, W] (input 64 channels)
        # Bottom up path layers
        self.conv_b_up_x8 = Conv2d(in_feature_shape[2], 256, 1)
        self.iabn_b_up_x8 = InPlaceABN(256)

        # Top down path layers
        self.conv_t_dn_x8 = Conv2d(in_feature_shape[2], 256, 1)
        self.iabn_t_dn_x8 = InPlaceABN(256)

        # x16 size [B, 176, H, W] (input 176 channels)
        # In the paper they took the 5 block of efficient net ie 128 channels
        # But taking last block seem more pertinent and was already implemented
        # Skipping to id 3 since block 4 does not interest us
        # Bottom up path layers
        self.conv_b_up_x16 = Conv2d(in_feature_shape[3], 256, 1)
        self.iabn_b_up_x16 = InPlaceABN(256)

        # Top down path layers
        self.conv_t_dn_x16 = Conv2d(in_feature_shape[3], 256, 1)
        self.iabn_t_dn_x16 = InPlaceABN(256)

        # x32 size [B, 2048, H, W] (input 2048 channels)
        # Bottom up path layers
        self.conv_b_up_x32 = Conv2d(in_feature_shape[4], 256, 1)
        self.iabn_b_up_x32 = InPlaceABN(256)

        # Top down path layers
        self.conv_t_dn_x32 = Conv2d(in_feature_shape[4], 256, 1)
        self.iabn_t_dn_x32 = InPlaceABN(256)

        # Separable Conv and Inplace BN at the output of the FPN
        # x4
        self.depth_wise_conv_x4 = DepthwiseSeparableConv(in_channels=256,
                                                         out_channels=256,
                                                         kernel_size=3,
                                                         padding=1)
        self.iabn_out_x4 = InPlaceABN(256)
        # x8
        self.depth_wise_conv_x8 = DepthwiseSeparableConv(in_channels=256,
                                                         out_channels=256,
                                                         kernel_size=3,
                                                         padding=1)
        self.iabn_out_x8 = InPlaceABN(256)
        # x16
        self.depth_wise_conv_x16 = DepthwiseSeparableConv(in_channels=256,
                                                          out_channels=256,
                                                          kernel_size=3,
                                                          padding=1)
        self.iabn_out_x16 = InPlaceABN(256)
        # x32
        self.depth_wise_conv_x32 = DepthwiseSeparableConv(in_channels=256,
                                                          out_channels=256,
                                                          kernel_size=3,
                                                          padding=1)
        self.iabn_out_x32 = InPlaceABN(256)