Esempio n. 1
0
    def __init__(self, input_num, num1, num2, dilation_rate, drop_out,
                 bn_type):
        super(_DenseAsppBlock, self).__init__()
        self.add_module('relu1', nn.ReLU(inplace=False)),
        self.add_module(
            'conv1',
            nn.Conv2d(in_channels=input_num, out_channels=num1,
                      kernel_size=1)),

        self.add_module(
            'norm2',
            ModuleHelper.BatchNorm2d(bn_type=bn_type)(num_features=num1)),
        self.add_module('relu2', nn.ReLU(inplace=False)),
        self.add_module(
            'conv2',
            nn.Conv2d(in_channels=num1,
                      out_channels=num2,
                      kernel_size=3,
                      dilation=dilation_rate,
                      padding=dilation_rate)),
        self.add_module(
            'norm2',
            ModuleHelper.BatchNorm2d(bn_type=bn_type)(num_features=input_num)),

        self.drop_rate = drop_out
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              bn_type=None,
              bn_momentum=0.1):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(
         planes, momentum=bn_momentum)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False)
     self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(
         planes, momentum=bn_momentum)
     self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
     self.bn3 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(
         planes * 4, momentum=bn_momentum)
     self.relu = nn.ReLU(inplace=False)
     self.relu_in = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64,
              dilation=1,
              bn_type=None):
     super(BasicBlock, self).__init__()
     if groups != 1 or base_width != 64:
         raise ValueError(
             'BasicBlock only supports groups=1 and base_width=64')
     if dilation > 1:
         raise NotImplementedError(
             "Dilation > 1 not supported in BasicBlock")
     # Both self.conv1 and self.downsample layers downsample the input when stride != 1
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes)
     self.relu = nn.ReLU(inplace=False)
     self.relu_in = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes)
     self.downsample = downsample
     self.stride = stride
Esempio n. 4
0
    def __init__(self, num_class=150, fc_dim=4096, bn_type=None):
        super(PPMBilinearDeepsup, self).__init__()
        self.bn_type = bn_type
        pool_scales = (1, 2, 3, 6)
        self.ppm = []
        # assert bn_type == 'syncbn' or not self.training
        # Torch BN can't handle feature map size with 1x1.
        for scale in pool_scales:
            self.ppm.append(nn.Sequential(
                nn.AdaptiveAvgPool2d(scale),
                nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
                ModuleHelper.BatchNorm2d(bn_type=bn_type)(512),
                nn.ReLU(inplace=True)
            ))

        self.ppm = nn.ModuleList(self.ppm)
        self.cbr_deepsup = _ConvBatchNormReluBlock(fc_dim // 2, fc_dim // 4, 3, 1, bn_type=bn_type)
        self.conv_last = nn.Sequential(
            nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
                      kernel_size=3, padding=1, bias=False),
            ModuleHelper.BatchNorm2d(bn_type=bn_type)(512),
            nn.ReLU(inplace=True),
            nn.Dropout2d(0.1),
            nn.Conv2d(512, num_class, kernel_size=1)
        )
        self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
        self.dropout_deepsup = nn.Dropout2d(0.1)
Esempio n. 5
0
 def __init__(self,
              in_channels,
              channels,
              kernel_size,
              stride=(1, 1),
              padding=(0, 0),
              dilation=(1, 1),
              groups=1,
              bias=True,
              radix=2,
              reduction_factor=4,
              rectify=False,
              rectify_avg=False,
              bn_type=None,
              dropblock_prob=0.0,
              **kwargs):
     super(SplAtConv2d, self).__init__()
     padding = _pair(padding)
     self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)
     self.rectify_avg = rectify_avg
     inter_channels = max(in_channels * radix // reduction_factor, 32)
     self.radix = radix
     self.cardinality = groups
     self.channels = channels
     self.dropblock_prob = dropblock_prob
     if self.rectify:
         from rfconv import RFConv2d
         self.conv = RFConv2d(in_channels,
                              channels * radix,
                              kernel_size,
                              stride,
                              padding,
                              dilation,
                              groups=groups * radix,
                              bias=bias,
                              average_mode=rectify_avg,
                              **kwargs)
     else:
         self.conv = Conv2d(in_channels,
                            channels * radix,
                            kernel_size,
                            stride,
                            padding,
                            dilation,
                            groups=groups * radix,
                            bias=bias,
                            **kwargs)
     self.use_bn = bn_type is not None
     self.bn0 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(channels * radix)
     self.relu = ReLU(inplace=False)
     self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
     self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(inter_channels)
     self.fc2 = Conv2d(inter_channels,
                       channels * radix,
                       1,
                       groups=self.cardinality)
     if dropblock_prob > 0.0:
         self.dropblock = DropBlock2D(dropblock_prob, 3)
     self.rsoftmax = rSoftMax(radix, groups)
    def _make_fuse_layers(self, bn_type, bn_momentum=0.1):
        if self.num_branches == 1:
            return None

        num_branches = self.num_branches
        num_inchannels = self.num_inchannels
        fuse_layers = []
        for i in range(num_branches if self.multi_scale_output else 1):
            fuse_layer = []
            for j in range(num_branches):
                if j > i:
                    fuse_layer.append(
                        nn.Sequential(
                            nn.Conv2d(num_inchannels[j],
                                      num_inchannels[i],
                                      1,
                                      1,
                                      0,
                                      bias=False),
                            ModuleHelper.BatchNorm2d(bn_type=bn_type)(
                                num_inchannels[i], momentum=bn_momentum),
                        ))
                elif j == i:
                    fuse_layer.append(None)
                else:
                    conv3x3s = []
                    for k in range(i - j):
                        if k == i - j - 1:
                            num_outchannels_conv3x3 = num_inchannels[i]
                            conv3x3s.append(
                                nn.Sequential(
                                    nn.Conv2d(num_inchannels[j],
                                              num_outchannels_conv3x3,
                                              3,
                                              2,
                                              1,
                                              bias=False),
                                    ModuleHelper.BatchNorm2d(bn_type=bn_type)(
                                        num_outchannels_conv3x3,
                                        momentum=bn_momentum)))
                        else:
                            num_outchannels_conv3x3 = num_inchannels[j]
                            conv3x3s.append(
                                nn.Sequential(
                                    nn.Conv2d(num_inchannels[j],
                                              num_outchannels_conv3x3,
                                              3,
                                              2,
                                              1,
                                              bias=False),
                                    ModuleHelper.BatchNorm2d(bn_type=bn_type)(
                                        num_outchannels_conv3x3,
                                        momentum=bn_momentum),
                                    nn.ReLU(inplace=False)))
                    fuse_layer.append(nn.Sequential(*conv3x3s))
            fuse_layers.append(nn.ModuleList(fuse_layer))

        return nn.ModuleList(fuse_layers)
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              norm_type=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes)
     self.relu = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes)
     self.downsample = downsample
     self.stride = stride
    def _make_head(self, pre_stage_channels, bn_type, bn_momentum):
        head_block = Bottleneck
        head_channels = [32, 64, 128, 256]

        Log.info("pre_stage_channels: {}".format(pre_stage_channels))
        Log.info("head_channels: {}".format(head_channels))

        # Increasing the #channels on each resolution
        # from C, 2C, 4C, 8C to 128, 256, 512, 1024
        incre_modules = []
        for i, channels in enumerate(pre_stage_channels):
            incre_module = self._make_layer(head_block,
                                            channels,
                                            head_channels[i],
                                            1,
                                            bn_type=bn_type,
                                            bn_momentum=bn_momentum)
            incre_modules.append(incre_module)
        incre_modules = nn.ModuleList(incre_modules)

        # downsampling modules
        downsamp_modules = []
        for i in range(len(pre_stage_channels) - 1):
            in_channels = head_channels[i] * head_block.expansion
            out_channels = head_channels[i + 1] * head_block.expansion

            downsamp_module = nn.Sequential(
                nn.Conv2d(in_channels=in_channels,
                          out_channels=out_channels,
                          kernel_size=3,
                          stride=2,
                          padding=1),
                ModuleHelper.BatchNorm2d(bn_type=bn_type)(
                    out_channels, momentum=bn_momentum),
                nn.ReLU(inplace=False))
            downsamp_modules.append(downsamp_module)
        downsamp_modules = nn.ModuleList(downsamp_modules)

        final_layer = nn.Sequential(
            nn.Conv2d(in_channels=head_channels[3] * head_block.expansion,
                      out_channels=2048,
                      kernel_size=1,
                      stride=1,
                      padding=0),
            ModuleHelper.BatchNorm2d(bn_type=bn_type)(2048,
                                                      momentum=bn_momentum),
            nn.ReLU(inplace=False))
        return incre_modules, downsamp_modules, final_layer
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    stride=1,
                    norm_type=None,
                    rm_last_stride=False):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            if rm_last_stride:
                stride = 1

            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                ModuleHelper.BatchNorm2d(norm_type=norm_type)(planes *
                                                              block.expansion),
            )

        layers = []
        layers.append(
            block(self.inplanes,
                  planes,
                  stride,
                  downsample,
                  norm_type=norm_type))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, norm_type=norm_type))

        return nn.Sequential(*layers)
Esempio n. 10
0
 def __init__(self, inplanes, outplanes, kernel_size, stride, padding=1, dilation=1, relu=True, bn_type=None):
     super(_ConvBatchNormReluBlock, self).__init__()
     self.relu = relu
     self.conv = nn.Conv2d(in_channels=inplanes,out_channels=outplanes,
                           kernel_size=kernel_size, stride=stride, padding=padding,
                           dilation = dilation, bias=False)
     self.bn = ModuleHelper.BatchNorm2d(bn_type=bn_type)(num_features=outplanes)
     self.relu_f = nn.ReLU()
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              bn_type=None,
              bn_momentum=0.1):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(
         planes, momentum=bn_momentum)
     self.relu = nn.ReLU(inplace=False)
     self.relu_in = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(
         planes, momentum=bn_momentum)
     self.downsample = downsample
     self.stride = stride
    def _make_transition_layer(self, num_channels_pre_layer,
                               num_channels_cur_layer, bn_type, bn_momentum):

        num_branches_cur = len(num_channels_cur_layer)
        num_branches_pre = len(num_channels_pre_layer)

        transition_layers = []
        for i in range(num_branches_cur):
            if i < num_branches_pre:
                if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
                    transition_layers.append(
                        nn.Sequential(
                            nn.Conv2d(num_channels_pre_layer[i],
                                      num_channels_cur_layer[i],
                                      3,
                                      1,
                                      1,
                                      bias=False),
                            ModuleHelper.BatchNorm2d(bn_type=bn_type)(
                                num_channels_cur_layer[i],
                                momentum=bn_momentum), nn.ReLU(inplace=False)))
                else:
                    transition_layers.append(None)
            else:
                conv3x3s = []
                for j in range(i + 1 - num_branches_pre):
                    inchannels = num_channels_pre_layer[-1]
                    outchannels = num_channels_cur_layer[i] \
                        if j == i-num_branches_pre else inchannels
                    conv3x3s.append(
                        nn.Sequential(
                            nn.Conv2d(inchannels,
                                      outchannels,
                                      3,
                                      2,
                                      1,
                                      bias=False),
                            ModuleHelper.BatchNorm2d(bn_type=bn_type)(
                                outchannels, momentum=bn_momentum),
                            nn.ReLU(inplace=False)))
                transition_layers.append(nn.Sequential(*conv3x3s))

        return nn.ModuleList(transition_layers)
Esempio n. 13
0
def make_res_layer(block,
                   inplanes,
                   planes,
                   blocks,
                   stride=1,
                   dilation=1,
                   style='pytorch',
                   with_cp=False,
                   with_dcn=False,
                   dcn_offset_lr_mult=0.1,
                   use_regular_conv_on_stride=False,
                   use_modulated_dcn=False,
                   bn_type=None):
    downsample = None
    if stride != 1 or inplanes != planes * block.expansion:
        downsample = nn.Sequential(
            nn.Conv2d(inplanes,
                      planes * block.expansion,
                      kernel_size=1,
                      stride=stride,
                      bias=False),
            ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes *
                                                      block.expansion),
        )

    layers = []
    layers.append(
        block(inplanes,
              planes,
              stride,
              dilation,
              downsample,
              style=style,
              with_cp=with_cp,
              with_dcn=with_dcn,
              dcn_offset_lr_mult=dcn_offset_lr_mult,
              use_regular_conv_on_stride=use_regular_conv_on_stride,
              use_modulated_dcn=use_modulated_dcn,
              bn_type=bn_type))
    inplanes = planes * block.expansion
    for i in range(1, blocks):
        layers.append(
            block(inplanes,
                  planes,
                  1,
                  dilation,
                  style=style,
                  with_cp=with_cp,
                  with_dcn=with_dcn,
                  dcn_offset_lr_mult=dcn_offset_lr_mult,
                  use_regular_conv_on_stride=use_regular_conv_on_stride,
                  use_modulated_dcn=use_modulated_dcn,
                  bn_type=bn_type))

    return nn.Sequential(*layers)
Esempio n. 14
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              dilation=1,
              downsample=None,
              style='pytorch',
              with_cp=False,
              bn_type=None):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(inplanes, planes, stride, dilation)
     self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes)
     self.relu = nn.ReLU(inplace=False)
     self.relu_in = nn.ReLU(inplace=True)
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes)
     self.downsample = downsample
     self.stride = stride
     self.dilation = dilation
     assert not with_cp
Esempio n. 15
0
 def __init__(self, num_input_features, num_output_features, bn_type):
     super(_Transition, self).__init__()
     self.add_module('relu', nn.ReLU(inplace=False))
     self.add_module(
         'conv',
         nn.Conv2d(num_input_features,
                   num_output_features,
                   kernel_size=1,
                   stride=1,
                   bias=False))
     self.add_module(
         'norm',
         ModuleHelper.BatchNorm2d(bn_type=bn_type)(
             num_features=num_output_features)),
Esempio n. 16
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64,
              dilation=1,
              bn_type=None):
     super(Bottleneck, self).__init__()
     width = int(planes * (base_width / 64.)) * groups
     # Both self.conv2 and self.downsample layers downsample the input when stride != 1
     self.conv1 = conv1x1(inplanes, width)
     self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(width)
     self.conv2 = conv3x3(width, width, stride, groups, dilation)
     self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(width)
     self.conv3 = conv1x1(width, planes * self.expansion)
     self.bn3 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes *
                                                          self.expansion)
     self.relu = nn.ReLU(inplace=False)
     self.relu_in = nn.ReLU(inplace=True)
     self.downsample = downsample
     self.stride = stride
Esempio n. 17
0
 def __init__(self, num_classes, bn_type=None):
     super(DeepLabHead, self).__init__()
     # auxiliary loss
     self.layer_dsn = nn.Sequential(
         nn.Conv2d(1024, 256, kernel_size=3, stride=1, padding=1),
         ModuleHelper.BNReLU(256, bn_type=bn_type),
         nn.Conv2d(256,
                   num_classes,
                   kernel_size=1,
                   stride=1,
                   padding=0,
                   bias=True))
     # main pipeline
     self.layer_aspp = ASPPModule(2048, 512, bn_type=bn_type)
     self.refine = nn.Sequential(
         nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1,
                   bias=False),
         ModuleHelper.BatchNorm2d(bn_type=bn_type)(512),
         nn.Conv2d(512, num_classes, kernel_size=1, stride=1, bias=True))
Esempio n. 18
0
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    stride=1,
                    dilate=False,
                    bn_type=None):
        downsample = None
        previous_dilation = self.dilation
        if dilate:
            self.dilation *= stride
            stride = 1
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                conv1x1(self.inplanes, planes * block.expansion, stride),
                ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes *
                                                          block.expansion),
            )

        layers = []
        layers.append(
            block(self.inplanes,
                  planes,
                  stride,
                  downsample,
                  self.groups,
                  self.base_width,
                  previous_dilation,
                  bn_type=bn_type))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(
                block(self.inplanes,
                      planes,
                      groups=self.groups,
                      base_width=self.base_width,
                      dilation=self.dilation,
                      bn_type=bn_type))

        return nn.Sequential(*layers)
    def _make_one_branch(self,
                         branch_index,
                         block,
                         num_blocks,
                         num_channels,
                         stride=1,
                         bn_type=None,
                         bn_momentum=0.1):
        downsample = None
        if stride != 1 or \
           self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.num_inchannels[branch_index],
                          num_channels[branch_index] * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                ModuleHelper.BatchNorm2d(bn_type=bn_type)(
                    num_channels[branch_index] * block.expansion,
                    momentum=bn_momentum),
            )

        layers = []
        layers.append(
            block(self.num_inchannels[branch_index],
                  num_channels[branch_index],
                  stride,
                  downsample,
                  bn_type=bn_type,
                  bn_momentum=bn_momentum))
        self.num_inchannels[branch_index] = \
            num_channels[branch_index] * block.expansion
        for i in range(1, num_blocks[branch_index]):
            layers.append(
                block(self.num_inchannels[branch_index],
                      num_channels[branch_index],
                      bn_type=bn_type,
                      bn_momentum=bn_momentum))

        return nn.Sequential(*layers)
    def _make_layer(self,
                    block,
                    inplanes,
                    planes,
                    blocks,
                    stride=1,
                    bn_type=None,
                    bn_momentum=0.1):
        downsample = None
        if stride != 1 or inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                ModuleHelper.BatchNorm2d(bn_type=bn_type)(
                    planes * block.expansion, momentum=bn_momentum))

        layers = []
        layers.append(
            block(inplanes,
                  planes,
                  stride,
                  downsample,
                  bn_type=bn_type,
                  bn_momentum=bn_momentum))

        inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(inplanes,
                      planes,
                      bn_type=bn_type,
                      bn_momentum=bn_momentum))

        return nn.Sequential(*layers)
Esempio n. 21
0
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    stride=1,
                    dilation=1,
                    bn_type=None,
                    dropblock_prob=0.0,
                    is_first=True):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            down_layers = []
            if self.avg_down:
                if dilation == 1:
                    down_layers.append(
                        nn.AvgPool2d(kernel_size=stride,
                                     stride=stride,
                                     ceil_mode=True,
                                     count_include_pad=False))
                else:
                    down_layers.append(
                        nn.AvgPool2d(kernel_size=1,
                                     stride=1,
                                     ceil_mode=True,
                                     count_include_pad=False))
                down_layers.append(
                    nn.Conv2d(self.inplanes,
                              planes * block.expansion,
                              kernel_size=1,
                              stride=1,
                              bias=False))
            else:
                down_layers.append(
                    nn.Conv2d(self.inplanes,
                              planes * block.expansion,
                              kernel_size=1,
                              stride=stride,
                              bias=False))
            down_layers.append(
                ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes *
                                                          block.expansion))
            downsample = nn.Sequential(*down_layers)

        layers = []
        if dilation == 1 or dilation == 2:
            layers.append(
                block(self.inplanes,
                      planes,
                      stride,
                      downsample=downsample,
                      radix=self.radix,
                      cardinality=self.cardinality,
                      bottleneck_width=self.bottleneck_width,
                      avd=self.avd,
                      avd_first=self.avd_first,
                      dilation=1,
                      is_first=is_first,
                      rectified_conv=self.rectified_conv,
                      rectify_avg=self.rectify_avg,
                      bn_type=bn_type,
                      dropblock_prob=dropblock_prob,
                      last_gamma=self.last_gamma))
        elif dilation == 4:
            layers.append(
                block(self.inplanes,
                      planes,
                      stride,
                      downsample=downsample,
                      radix=self.radix,
                      cardinality=self.cardinality,
                      bottleneck_width=self.bottleneck_width,
                      avd=self.avd,
                      avd_first=self.avd_first,
                      dilation=2,
                      is_first=is_first,
                      rectified_conv=self.rectified_conv,
                      rectify_avg=self.rectify_avg,
                      bn_type=bn_type,
                      dropblock_prob=dropblock_prob,
                      last_gamma=self.last_gamma))
        else:
            raise RuntimeError("=> unknown dilation size: {}".format(dilation))

        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes,
                      planes,
                      radix=self.radix,
                      cardinality=self.cardinality,
                      bottleneck_width=self.bottleneck_width,
                      avd=self.avd,
                      avd_first=self.avd_first,
                      dilation=dilation,
                      rectified_conv=self.rectified_conv,
                      rectify_avg=self.rectify_avg,
                      bn_type=bn_type,
                      dropblock_prob=dropblock_prob,
                      last_gamma=self.last_gamma))

        return nn.Sequential(*layers)
Esempio n. 22
0
    def __init__(self,
                 block,
                 layers,
                 radix=1,
                 groups=1,
                 bottleneck_width=64,
                 num_classes=1000,
                 dilated=False,
                 dilation=1,
                 deep_stem=False,
                 stem_width=64,
                 avg_down=False,
                 rectified_conv=False,
                 rectify_avg=False,
                 avd=False,
                 avd_first=False,
                 final_drop=0.0,
                 dropblock_prob=0,
                 last_gamma=False,
                 bn_type=None):
        self.cardinality = groups
        self.bottleneck_width = bottleneck_width
        # ResNet-D params
        self.inplanes = stem_width * 2 if deep_stem else 64
        self.avg_down = avg_down
        self.last_gamma = last_gamma
        # ResNeSt params
        self.radix = radix
        self.avd = avd
        self.avd_first = avd_first

        super(ResNeSt, self).__init__()
        self.rectified_conv = rectified_conv
        self.rectify_avg = rectify_avg
        if rectified_conv:
            from rfconv import RFConv2d
            conv_layer = RFConv2d
        else:
            conv_layer = nn.Conv2d
        conv_kwargs = {'average_mode': rectify_avg} if rectified_conv else {}
        if deep_stem:
            self.conv1 = nn.Sequential(
                conv_layer(3,
                           stem_width,
                           kernel_size=3,
                           stride=2,
                           padding=1,
                           bias=False,
                           **conv_kwargs),
                ModuleHelper.BatchNorm2d(bn_type=bn_type)(stem_width),
                nn.ReLU(inplace=False),
                conv_layer(stem_width,
                           stem_width,
                           kernel_size=3,
                           stride=1,
                           padding=1,
                           bias=False,
                           **conv_kwargs),
                ModuleHelper.BatchNorm2d(bn_type=bn_type)(stem_width),
                nn.ReLU(inplace=False),
                conv_layer(stem_width,
                           stem_width * 2,
                           kernel_size=3,
                           stride=1,
                           padding=1,
                           bias=False,
                           **conv_kwargs),
            )
        else:
            self.conv1 = conv_layer(3,
                                    64,
                                    kernel_size=7,
                                    stride=2,
                                    padding=3,
                                    bias=False,
                                    **conv_kwargs)
        self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(self.inplanes)
        self.relu = nn.ReLU(inplace=False)
        self.maxpool = nn.MaxPool2d(kernel_size=3,
                                    stride=2,
                                    padding=1,
                                    ceil_mode=True)  # change.
        self.layer1 = self._make_layer(block,
                                       64,
                                       layers[0],
                                       bn_type=bn_type,
                                       is_first=False)
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=2,
                                       bn_type=bn_type)

        if dilated or dilation == 4:
            self.layer3 = self._make_layer(block,
                                           256,
                                           layers[2],
                                           stride=1,
                                           dilation=2,
                                           bn_type=bn_type,
                                           dropblock_prob=dropblock_prob)
            self.layer4 = self._make_layer(block,
                                           512,
                                           layers[3],
                                           stride=1,
                                           dilation=4,
                                           bn_type=bn_type,
                                           dropblock_prob=dropblock_prob)
        elif dilation == 2:
            self.layer3 = self._make_layer(block,
                                           256,
                                           layers[2],
                                           stride=2,
                                           dilation=1,
                                           bn_type=bn_type,
                                           dropblock_prob=dropblock_prob)
            self.layer4 = self._make_layer(block,
                                           512,
                                           layers[3],
                                           stride=1,
                                           dilation=2,
                                           bn_type=bn_type,
                                           dropblock_prob=dropblock_prob)
        else:
            self.layer3 = self._make_layer(block,
                                           256,
                                           layers[2],
                                           stride=2,
                                           bn_type=bn_type,
                                           dropblock_prob=dropblock_prob)
            self.layer4 = self._make_layer(block,
                                           512,
                                           layers[3],
                                           stride=2,
                                           bn_type=bn_type,
                                           dropblock_prob=dropblock_prob)
        self.avgpool = GlobalAvgPool2d()
        self.drop = nn.Dropout(final_drop) if final_drop > 0.0 else None
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(
                    m, ModuleHelper.BatchNorm2d(bn_type=bn_type,
                                                ret_cls=True)):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Esempio n. 23
0
    def __init__(self, block, layers, deep_base=True, bn_type=None):
        super(DCNResNet, self).__init__()
        # if depth not in self.arch_settings:
        #     raise KeyError('invalid depth {} for resnet'.format(depth))
        # assert num_stages >= 1 and num_stages <= 4
        # block, stage_blocks = self.arch_settings[depth]
        # stage_blocks = stage_blocks[:num_stages]
        # assert len(strides) == len(dilations) == num_stages
        # assert max(out_indices) < num_stages
        self.style = 'pytorch'
        self.inplanes = 128 if deep_base else 64
        if deep_base:
            self.resinit = nn.Sequential(
                OrderedDict([
                    ('conv1',
                     nn.Conv2d(3,
                               64,
                               kernel_size=3,
                               stride=2,
                               padding=1,
                               bias=False)),
                    ('bn1', ModuleHelper.BatchNorm2d(bn_type=bn_type)(64)),
                    ('relu1', nn.ReLU(inplace=False)),
                    ('conv2',
                     nn.Conv2d(64,
                               64,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)),
                    ('bn2', ModuleHelper.BatchNorm2d(bn_type=bn_type)(64)),
                    ('relu2', nn.ReLU(inplace=False)),
                    ('conv3',
                     nn.Conv2d(64,
                               128,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)),
                    ('bn3',
                     ModuleHelper.BatchNorm2d(bn_type=bn_type)(self.inplanes)),
                    ('relu3', nn.ReLU(inplace=False))
                ]))
        else:
            self.resinit = nn.Sequential(
                OrderedDict([
                    ('conv1',
                     nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)),
                    ('bn1',
                     ModuleHelper.BatchNorm2d(bn_type=bn_type)(self.inplanes)),
                    ('relu1', nn.ReLU(inplace=False))
                ]))
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = make_res_layer(block,
                                     self.inplanes,
                                     64,
                                     layers[0],
                                     style=self.style,
                                     with_dcn=False,
                                     use_modulated_dcn=False,
                                     bn_type=bn_type)

        self.layer2 = make_res_layer(block,
                                     256,
                                     128,
                                     layers[1],
                                     stride=2,
                                     style=self.style,
                                     with_dcn=False,
                                     use_modulated_dcn=False,
                                     bn_type=bn_type)

        self.layer3 = make_res_layer(block,
                                     512,
                                     256,
                                     layers[2],
                                     stride=2,
                                     style=self.style,
                                     with_dcn=True,
                                     use_modulated_dcn=False,
                                     bn_type=bn_type)

        self.layer4 = make_res_layer(block,
                                     1024,
                                     512,
                                     layers[3],
                                     stride=2,
                                     style=self.style,
                                     with_dcn=True,
                                     use_modulated_dcn=False,
                                     bn_type=bn_type)
Esempio n. 24
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 downsample=None,
                 radix=1,
                 cardinality=1,
                 bottleneck_width=64,
                 avd=False,
                 avd_first=False,
                 dilation=1,
                 is_first=False,
                 rectified_conv=False,
                 rectify_avg=False,
                 bn_type=None,
                 dropblock_prob=0.0,
                 last_gamma=False):
        super(Bottleneck, self).__init__()
        group_width = int(planes * (bottleneck_width / 64.)) * cardinality
        self.conv1 = nn.Conv2d(inplanes,
                               group_width,
                               kernel_size=1,
                               bias=False)
        self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(group_width)
        self.dropblock_prob = dropblock_prob
        self.radix = radix
        self.avd = avd and (stride > 1 or is_first)
        self.avd_first = avd_first

        if self.avd:
            self.avd_layer = nn.AvgPool2d(3, stride, padding=1)
            stride = 1

        if dropblock_prob > 0.0:
            self.dropblock1 = DropBlock2D(dropblock_prob, 3)
            if radix == 1:
                self.dropblock2 = DropBlock2D(dropblock_prob, 3)
            self.dropblock3 = DropBlock2D(dropblock_prob, 3)

        if radix > 1:
            self.conv2 = SplAtConv2d(group_width,
                                     group_width,
                                     kernel_size=3,
                                     stride=stride,
                                     padding=dilation,
                                     dilation=dilation,
                                     groups=cardinality,
                                     bias=False,
                                     radix=radix,
                                     rectify=rectified_conv,
                                     rectify_avg=rectify_avg,
                                     bn_type=bn_type,
                                     dropblock_prob=dropblock_prob)
        elif rectified_conv:
            from rfconv import RFConv2d
            self.conv2 = RFConv2d(group_width,
                                  group_width,
                                  kernel_size=3,
                                  stride=stride,
                                  padding=dilation,
                                  dilation=dilation,
                                  groups=cardinality,
                                  bias=False,
                                  average_mode=rectify_avg)
            self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(group_width)
        else:
            self.conv2 = nn.Conv2d(group_width,
                                   group_width,
                                   kernel_size=3,
                                   stride=stride,
                                   padding=dilation,
                                   dilation=dilation,
                                   groups=cardinality,
                                   bias=False)
            self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(group_width)

        self.conv3 = nn.Conv2d(group_width,
                               planes * 4,
                               kernel_size=1,
                               bias=False)
        self.bn3 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes * 4)

        if last_gamma:
            from torch.nn.init import zeros_
            zeros_(self.bn3.weight)
        self.relu = nn.ReLU(inplace=False)
        self.relu_in = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.dilation = dilation
        self.stride = stride
Esempio n. 25
0
    def __init__(self,
                 block,
                 layers,
                 num_classes=1000,
                 deep_base=False,
                 bn_type=None):
        super(ResNet, self).__init__()
        self.inplanes = 128 if deep_base else 64
        if deep_base:
            self.resinit = nn.Sequential(
                OrderedDict([
                    ('conv1',
                     nn.Conv2d(3,
                               64,
                               kernel_size=3,
                               stride=2,
                               padding=1,
                               bias=False)),
                    ('bn1', ModuleHelper.BatchNorm2d(bn_type=bn_type)(64)),
                    ('relu1', nn.ReLU(inplace=False)),
                    ('conv2',
                     nn.Conv2d(64,
                               64,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)),
                    ('bn2', ModuleHelper.BatchNorm2d(bn_type=bn_type)(64)),
                    ('relu2', nn.ReLU(inplace=False)),
                    ('conv3',
                     nn.Conv2d(64,
                               128,
                               kernel_size=3,
                               stride=1,
                               padding=1,
                               bias=False)),
                    ('bn3',
                     ModuleHelper.BatchNorm2d(bn_type=bn_type)(self.inplanes)),
                    ('relu3', nn.ReLU(inplace=False))
                ]))
        else:
            self.resinit = nn.Sequential(
                OrderedDict([
                    ('conv1',
                     nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)),
                    ('bn1',
                     ModuleHelper.BatchNorm2d(bn_type=bn_type)(self.inplanes)),
                    ('relu1', nn.ReLU(inplace=False))
                ]))

        self.maxpool = nn.MaxPool2d(kernel_size=3,
                                    stride=2,
                                    padding=1,
                                    ceil_mode=True)  # change.

        self.layer1 = self._make_layer(block, 64, layers[0], bn_type=bn_type)
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=2,
                                       bn_type=bn_type)
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=2,
                                       bn_type=bn_type)
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=2,
                                       bn_type=bn_type)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(
                    m, ModuleHelper.BatchNorm2d(bn_type=bn_type,
                                                ret_cls=True)):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Esempio n. 26
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 with_dcn=False,
                 num_deformable_groups=1,
                 dcn_offset_lr_mult=0.1,
                 use_regular_conv_on_stride=False,
                 use_modulated_dcn=False,
                 bn_type=None):
        """Bottleneck block.
        If style is "pytorch", the stride-two layer is the 3x3 conv layer,
        if it is "caffe", the stride-two layer is the first 1x1 conv layer.
        """
        super(Bottleneck, self).__init__()
        conv1_stride = 1
        conv2_stride = stride

        self.conv1 = nn.Conv2d(inplanes,
                               planes,
                               kernel_size=1,
                               stride=conv1_stride,
                               bias=False)

        self.with_dcn = with_dcn
        self.use_modulated_dcn = use_modulated_dcn
        if use_regular_conv_on_stride and stride > 1:
            self.with_dcn = False
        if self.with_dcn:
            print("--->> use {}dcn in block where c_in={} and c_out={}".format(
                'modulated ' if self.use_modulated_dcn else '', planes,
                inplanes))
            if use_modulated_dcn:
                self.conv_offset_mask = nn.Conv2d(planes,
                                                  num_deformable_groups * 27,
                                                  kernel_size=3,
                                                  stride=conv2_stride,
                                                  padding=dilation,
                                                  dilation=dilation)
                self.conv_offset_mask.lr_mult = dcn_offset_lr_mult
                self.conv_offset_mask.zero_init = True

                self.conv2 = ModulatedDeformConv(
                    planes,
                    planes,
                    3,
                    stride=conv2_stride,
                    padding=dilation,
                    dilation=dilation,
                    deformable_groups=num_deformable_groups,
                    no_bias=True)
            else:
                self.conv2_offset = nn.Conv2d(planes,
                                              num_deformable_groups * 18,
                                              kernel_size=3,
                                              stride=conv2_stride,
                                              padding=dilation,
                                              dilation=dilation)
                self.conv2_offset.lr_mult = dcn_offset_lr_mult
                self.conv2_offset.zero_init = True

                self.conv2 = DeformConv(
                    planes,
                    planes, (3, 3),
                    stride=conv2_stride,
                    padding=dilation,
                    dilation=dilation,
                    num_deformable_groups=num_deformable_groups)
        else:
            self.conv2 = nn.Conv2d(planes,
                                   planes,
                                   kernel_size=3,
                                   stride=conv2_stride,
                                   padding=dilation,
                                   dilation=dilation,
                                   bias=False)

        self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes)
        self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes)
        self.conv3 = nn.Conv2d(planes,
                               planes * self.expansion,
                               kernel_size=1,
                               bias=False)
        self.bn3 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes *
                                                             self.expansion)
        self.relu = nn.ReLU(inplace=False)
        self.relu_in = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
        self.with_cp = with_cp
    def __init__(self, cfg, bn_type, **kwargs):
        super(HighResolutionNext, self).__init__()
        # stem net
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=3,
                               stride=2,
                               padding=1,
                               bias=False)
        self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(64)
        self.relu = nn.ReLU(relu_inplace)

        self.stage1_cfg = cfg['STAGE1']
        num_channels = self.stage1_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage1_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition0 = self._make_transition_layer([64],
                                                       num_channels,
                                                       bn_type=bn_type)
        self.stage1, pre_stage_channels = self._make_stage(self.stage1_cfg,
                                                           num_channels,
                                                           bn_type=bn_type)

        self.stage2_cfg = cfg['STAGE2']
        num_channels = self.stage2_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage2_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition1 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels,
                                                       bn_type=bn_type)
        self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg,
                                                           num_channels,
                                                           bn_type=bn_type)

        self.stage3_cfg = cfg['STAGE3']
        num_channels = self.stage3_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage3_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition2 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels,
                                                       bn_type=bn_type)
        self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg,
                                                           num_channels,
                                                           bn_type=bn_type)

        self.stage4_cfg = cfg['STAGE4']
        num_channels = self.stage4_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage4_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition3 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels,
                                                       bn_type=bn_type)
        self.stage4, pre_stage_channels = self._make_stage(
            self.stage4_cfg,
            num_channels,
            multi_scale_output=True,
            bn_type=bn_type)
Esempio n. 28
0
    def __init__(self,
                 block,
                 layers,
                 num_classes=1000,
                 zero_init_residual=False,
                 groups=1,
                 width_per_group=64,
                 replace_stride_with_dilation=None,
                 bn_type=None):
        super(ResNet, self).__init__()

        self.inplanes = 64
        self.dilation = 1
        if replace_stride_with_dilation is None:
            # each element in the tuple indicates if we should replace
            # the 2x2 stride with a dilated convolution instead
            replace_stride_with_dilation = [False, False, False]
        if len(replace_stride_with_dilation) != 3:
            raise ValueError("replace_stride_with_dilation should be None "
                             "or a 3-element tuple, got {}".format(
                                 replace_stride_with_dilation))
        self.groups = groups
        self.base_width = width_per_group

        self.resinit = nn.Sequential(
            OrderedDict([
                ('conv1',
                 nn.Conv2d(3,
                           self.inplanes,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),
                ('bn1',
                 ModuleHelper.BatchNorm2d(bn_type=bn_type)(self.inplanes)),
                ('relu1', nn.ReLU(inplace=False))
            ]))

        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0], bn_type=bn_type)
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[0],
                                       bn_type=bn_type)
        self.layer3 = self._make_layer(block,
                                       256,
                                       layers[2],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[1],
                                       bn_type=bn_type)
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[2],
                                       bn_type=bn_type)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

        # Zero-initialize the last BN in each residual branch,
        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
        if zero_init_residual:
            for m in self.modules():
                if isinstance(m, Bottleneck):
                    nn.init.constant_(m.bn3.weight, 0)
                elif isinstance(m, BasicBlock):
                    nn.init.constant_(m.bn2.weight, 0)
Esempio n. 29
0
    def __init__(self, configer):
        super(DenseASPP, self).__init__()
        self.configer = configer
        dropout0 = 0.1
        dropout1 = 0.1

        self.backbone = BackboneSelector(configer).get_backbone()

        num_features = self.backbone.get_num_features()

        self.trans = _Transition(num_input_features=self.num_features,
                                 num_output_features=self.num_features // 2,
                                 bn_type=self.configer.get(
                                     'network', 'bn_type'))

        self.num_features = self.num_features // 2

        self.ASPP_3 = _DenseAsppBlock(input_num=num_features,
                                      num1=256,
                                      num2=64,
                                      dilation_rate=3,
                                      drop_out=dropout0,
                                      bn_type=self.configer.get(
                                          'network', 'bn_type'))

        self.ASPP_6 = _DenseAsppBlock(input_num=num_features + 64 * 1,
                                      num1=256,
                                      num2=64,
                                      dilation_rate=6,
                                      drop_out=dropout0,
                                      bn_type=self.configer.get(
                                          'network', 'bn_type'))

        self.ASPP_12 = _DenseAsppBlock(input_num=num_features + 64 * 2,
                                       num1=256,
                                       num2=64,
                                       dilation_rate=12,
                                       drop_out=dropout0,
                                       bn_type=self.configer.get(
                                           'network', 'bn_type'))

        self.ASPP_18 = _DenseAsppBlock(input_num=num_features + 64 * 3,
                                       num1=256,
                                       num2=64,
                                       dilation_rate=18,
                                       drop_out=dropout0,
                                       bn_type=self.configer.get(
                                           'network', 'bn_type'))

        self.ASPP_24 = _DenseAsppBlock(input_num=num_features + 64 * 4,
                                       num1=256,
                                       num2=64,
                                       dilation_rate=24,
                                       drop_out=dropout0,
                                       bn_type=self.configer.get(
                                           'network', 'bn_type'))

        num_features = num_features + 5 * 64

        self.classification = nn.Sequential(
            nn.Dropout2d(p=dropout1),
            nn.Conv2d(in_channels=num_features,
                      out_channels=self.configer.get('network',
                                                     'out_channels'),
                      kernel_size=1,
                      padding=0))

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.xavier_uniform_(m.weight.data)

            elif isinstance(
                    m,
                    ModuleHelper.BatchNorm2d(
                        bn_type=self.configer.get('network', 'bn_type'))):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
    def __init__(self, cfg, bn_type, bn_momentum, **kwargs):
        self.inplanes = 64
        super(HighResolutionNet, self).__init__()

        if os.environ.get('full_res_stem'):
            Log.info("using full-resolution stem with stride=1")
            stem_stride = 1
            self.conv1 = nn.Conv2d(3,
                                   64,
                                   kernel_size=3,
                                   stride=stem_stride,
                                   padding=1,
                                   bias=False)
            self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(
                64, momentum=bn_momentum)
            self.relu = nn.ReLU(inplace=False)
            self.layer1 = self._make_layer(Bottleneck,
                                           64,
                                           64,
                                           4,
                                           bn_type=bn_type,
                                           bn_momentum=bn_momentum)
        else:
            stem_stride = 2
            self.conv1 = nn.Conv2d(3,
                                   64,
                                   kernel_size=3,
                                   stride=stem_stride,
                                   padding=1,
                                   bias=False)
            self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(
                64, momentum=bn_momentum)
            self.conv2 = nn.Conv2d(64,
                                   64,
                                   kernel_size=3,
                                   stride=stem_stride,
                                   padding=1,
                                   bias=False)
            self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(
                64, momentum=bn_momentum)
            self.relu = nn.ReLU(inplace=False)
            self.layer1 = self._make_layer(Bottleneck,
                                           64,
                                           64,
                                           4,
                                           bn_type=bn_type,
                                           bn_momentum=bn_momentum)

        self.stage2_cfg = cfg['STAGE2']
        num_channels = self.stage2_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage2_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]

        self.transition1 = self._make_transition_layer([256],
                                                       num_channels,
                                                       bn_type=bn_type,
                                                       bn_momentum=bn_momentum)

        self.stage2, pre_stage_channels = self._make_stage(
            self.stage2_cfg,
            num_channels,
            bn_type=bn_type,
            bn_momentum=bn_momentum)

        self.stage3_cfg = cfg['STAGE3']
        num_channels = self.stage3_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage3_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition2 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels,
                                                       bn_type=bn_type,
                                                       bn_momentum=bn_momentum)
        self.stage3, pre_stage_channels = self._make_stage(
            self.stage3_cfg,
            num_channels,
            bn_type=bn_type,
            bn_momentum=bn_momentum)

        self.stage4_cfg = cfg['STAGE4']
        num_channels = self.stage4_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage4_cfg['BLOCK']]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        self.transition3 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels,
                                                       bn_type=bn_type,
                                                       bn_momentum=bn_momentum)

        self.stage4, pre_stage_channels = self._make_stage(
            self.stage4_cfg,
            num_channels,
            multi_scale_output=True,
            bn_type=bn_type,
            bn_momentum=bn_momentum,
            last=True)

        if os.environ.get('keep_imagenet_head'):
            self.incre_modules, self.downsamp_modules, \
                self.final_layer = self._make_head(pre_stage_channels, bn_type=bn_type, bn_momentum=bn_momentum)