Example #1
0
    def _make_fuse_layers(self):
        if self.num_branches == 1:
            return None

        num_branches = self.num_branches
        in_channels = self.in_channels
        fuse_layers = []
        num_out_branches = num_branches if self.multiscale_output else 1
        for i in range(num_out_branches):
            fuse_layer = []
            for j in range(num_branches):
                if j > i:
                    fuse_layer.append(
                        nn.Sequential(
                            build_conv_layer(self.conv_cfg,
                                             in_channels[j],
                                             in_channels[i],
                                             kernel_size=1,
                                             stride=1,
                                             padding=0,
                                             bias=False),
                            build_norm_layer(self.norm_cfg, in_channels[i])[1],
                            nn.Upsample(scale_factor=2**(j - i),
                                        mode='nearest')))
                elif j == i:
                    fuse_layer.append(None)
                else:
                    conv_downsamples = []
                    for k in range(i - j):
                        if k == i - j - 1:
                            conv_downsamples.append(
                                nn.Sequential(
                                    build_conv_layer(self.conv_cfg,
                                                     in_channels[j],
                                                     in_channels[i],
                                                     kernel_size=3,
                                                     stride=2,
                                                     padding=1,
                                                     bias=False),
                                    build_norm_layer(self.norm_cfg,
                                                     in_channels[i])[1]))
                        else:
                            conv_downsamples.append(
                                nn.Sequential(
                                    build_conv_layer(self.conv_cfg,
                                                     in_channels[j],
                                                     in_channels[j],
                                                     kernel_size=3,
                                                     stride=2,
                                                     padding=1,
                                                     bias=False),
                                    build_norm_layer(self.norm_cfg,
                                                     in_channels[j])[1],
                                    nn.ReLU(inplace=False)))
                    fuse_layer.append(nn.Sequential(*conv_downsamples))
            fuse_layers.append(nn.ModuleList(fuse_layer))

        return nn.ModuleList(fuse_layers)
Example #2
0
    def _init_branch_layers(self, planes, idx=0):
        wh_layers, hm_layers = [], []
        inp = planes
        if self.bn_before_head:
            wh_layers.append(nn.BatchNorm2d(inp))
            hm_layers.append(nn.BatchNorm2d(inp))
        for i in range(self.wh_head_conv_num[idx]):
            wh_layers.append(
                ConvModule(inp,
                           self.wh_head_channels[idx],
                           3,
                           padding=1,
                           conv_cfg=self.conv_cfg))
            inp = self.wh_head_channels[idx]
        if self.head_conv_cfg:
            wh_layers.append(
                build_conv_layer(self.head_conv_cfg,
                                 self.wh_head_channels[idx],
                                 4,
                                 kernel_size=3,
                                 padding=1))
        else:
            wh_layers.append(
                nn.Conv2d(self.wh_head_channels[idx], 4, 3, padding=1))

        inp = planes
        for i in range(self.hm_head_conv_num[idx]):
            hm_layers.append(
                ConvModule(inp,
                           self.hm_head_channels[idx],
                           3,
                           padding=1,
                           conv_cfg=self.conv_cfg))
            inp = self.hm_head_channels[idx]
        if self.head_conv_cfg:
            hm_layers.append(
                build_conv_layer(self.head_conv_cfg,
                                 self.hm_head_channels[idx],
                                 self.num_fg,
                                 kernel_size=3,
                                 padding=1))
        else:
            hm_layers.append(
                nn.Conv2d(self.hm_head_channels[idx],
                          self.num_fg,
                          3,
                          padding=1))

        wh_layers = nn.Sequential(*wh_layers)
        hm_layers = nn.Sequential(*hm_layers)
        return wh_layers, hm_layers
Example #3
0
def make_connection(inplanes,
                    planes,
                    conv_cfg=None,
                    norm_cfg=dict(type='BN')):
    """Make connection layer.

    :param inplanes: channels of current stage output
    :type inplanes: int

    :param planes: channels of current stage input
    :type planes: int

    :param conv_cfg: conv config
    :type conv_cfg: dict

    :param norm_cfg: norm config
    :type norm_cfg: dict

    :return: layer
    """
    layer = nn.Sequential(
        build_conv_layer(
            conv_cfg,
            inplanes,
            planes,
            kernel_size=1,
            stride=1,
            bias=False),
        build_norm_layer(norm_cfg, planes)[1],
    )
    return layer
Example #4
0
    def _make_layer(self, block, inplanes, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                build_conv_layer(self.conv_cfg,
                                 inplanes,
                                 planes * block.expansion,
                                 kernel_size=1,
                                 stride=stride,
                                 bias=False),
                build_norm_layer(self.norm_cfg, planes * block.expansion)[1])

        layers = []
        layers.append(
            block(inplanes,
                  planes,
                  stride,
                  downsample=downsample,
                  with_cp=self.with_cp,
                  norm_cfg=self.norm_cfg,
                  conv_cfg=self.conv_cfg))
        inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(inplanes,
                      planes,
                      with_cp=self.with_cp,
                      norm_cfg=self.norm_cfg,
                      conv_cfg=self.conv_cfg))

        return nn.Sequential(*layers)
Example #5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_sizes,
                 conv_cfg,
                 down=False):
        super(ShortcutConnection, self).__init__()
        layers = []
        for i, kernel_size in enumerate(kernel_sizes):
            inc = in_channels if i == 0 else out_channels
            stride = 1 if not down or i != 0 else 2
            padding = (kernel_size - 1) // 2
            if conv_cfg:
                layers.append(
                    build_conv_layer(conv_cfg,
                                     inc,
                                     out_channels,
                                     kernel_size,
                                     stride=stride,
                                     padding=padding))
            else:
                layers.append(
                    nn.Conv2d(inc,
                              out_channels,
                              kernel_size,
                              stride=stride,
                              padding=padding))
            if i < len(kernel_sizes) - 1:
                layers.append(nn.ReLU(inplace=True))

        self.layers = nn.Sequential(*layers)
Example #6
0
def make_res_layer(block,
                   inplanes,
                   planes,
                   blocks,
                   stride=1,
                   dilation=1,
                   groups=1,
                   base_width=4,
                   style='pytorch',
                   with_cp=False,
                   conv_cfg=None,
                   norm_cfg=dict(type='BN'),
                   dcn=None,
                   gcb=None):
    downsample = None
    if stride != 1 or inplanes != planes * block.expansion:
        downsample = nn.Sequential(
            build_conv_layer(conv_cfg,
                             inplanes,
                             planes * block.expansion,
                             kernel_size=1,
                             stride=stride,
                             bias=False),
            build_norm_layer(norm_cfg, planes * block.expansion)[1],
        )

    layers = []
    layers.append(
        block(inplanes=inplanes,
              planes=planes,
              stride=stride,
              dilation=dilation,
              downsample=downsample,
              groups=groups,
              base_width=base_width,
              style=style,
              with_cp=with_cp,
              conv_cfg=conv_cfg,
              norm_cfg=norm_cfg,
              dcn=dcn,
              gcb=gcb))
    inplanes = planes * block.expansion
    for i in range(1, blocks):
        layers.append(
            block(inplanes=inplanes,
                  planes=planes,
                  stride=1,
                  dilation=dilation,
                  groups=groups,
                  base_width=base_width,
                  style=style,
                  with_cp=with_cp,
                  conv_cfg=conv_cfg,
                  norm_cfg=norm_cfg,
                  dcn=dcn,
                  gcb=gcb))

    return nn.Sequential(*layers)
Example #7
0
def make_multigrid(block,
                   inplanes,
                   planes,
                   blocks,
                   stride=1,
                   dilation=1,
                   style='pytorch',
                   with_cp=False,
                   conv_cfg=None,
                   norm_cfg=dict(type='BN'),
                   dcn=None,
                   gcb=None,
                   gen_attention=None,
                   gen_attention_blocks=[]):
    downsample = None
    if stride != 1 or inplanes != planes * block.expansion:
        downsample = nn.Sequential(
            build_conv_layer(conv_cfg,
                             inplanes,
                             planes * block.expansion,
                             kernel_size=1,
                             stride=stride,
                             bias=False),
            build_norm_layer(norm_cfg, planes * block.expansion)[1],
        )

    layers = []
    layers.append(
        block(inplanes=inplanes,
              planes=planes,
              stride=stride,
              dilation=blocks[0] * dilation,
              downsample=downsample,
              style=style,
              with_cp=with_cp,
              conv_cfg=conv_cfg,
              norm_cfg=norm_cfg,
              dcn=dcn,
              gcb=gcb,
              gen_attention=gen_attention if
              (0 in gen_attention_blocks) else None))
    inplanes = planes * block.expansion
    for i in range(1, len(blocks)):
        layers.append(
            block(inplanes=inplanes,
                  planes=planes,
                  stride=1,
                  dilation=blocks[i] * dilation,
                  style=style,
                  with_cp=with_cp,
                  conv_cfg=conv_cfg,
                  norm_cfg=norm_cfg,
                  dcn=dcn,
                  gcb=gcb,
                  gen_attention=gen_attention if
                  (i in gen_attention_blocks) else None))

    return nn.Sequential(*layers)
Example #8
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None,
                 gcb=None,
                 gen_attention=None,
                 groups=1,
                 base_width=4):
        """Initialize."""
        super(BasicBlock, self).__init__()
        assert dcn is None, "Not implemented yet."
        assert gen_attention is None, "Not implemented yet."
        assert gcb is None, "Not implemented yet."
        if groups != 1:
            raise ValueError('BasicBlock only supports groups=1 and base_width=64')

        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)

        self.conv1 = build_conv_layer(
            conv_cfg,
            inplanes,
            planes,
            3,
            stride=stride,
            padding=dilation,
            dilation=dilation,
            bias=False)
        self.add_module(self.norm1_name, norm1)
        self.conv2 = build_conv_layer(
            conv_cfg, planes, planes, 3, padding=1, bias=False)
        self.add_module(self.norm2_name, norm2)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
        assert not with_cp
Example #9
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None,
                 gcb=None,
                 gen_attention=None):
        super(BasicBlock, self).__init__()
        assert dcn is None, "Not implemented yet."
        assert gen_attention is None, "Not implemented yet."
        assert gcb is None, "Not implemented yet."

        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)

        self.conv1 = build_conv_layer(conv_cfg,
                                      inplanes,
                                      planes,
                                      3,
                                      stride=stride,
                                      padding=dilation,
                                      dilation=dilation,
                                      bias=False)
        self.add_module(self.norm1_name, norm1)
        self.conv2 = build_conv_layer(conv_cfg,
                                      planes,
                                      planes,
                                      3,
                                      padding=1,
                                      bias=False)
        self.add_module(self.norm2_name, norm2)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
        assert not with_cp
Example #10
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None,
                 gcb=None,
                 gen_attention=None):
        super(BasicBlock, self).__init__()

        self.norm1_name, norm1 = build_norm_layer(norm_cfg,
                                                  int(planes / 2),
                                                  postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)

        self.conv1 = build_conv_layer(conv_cfg,
                                      inplanes,
                                      int(planes / 2),
                                      1,
                                      stride=stride,
                                      padding=0,
                                      dilation=dilation,
                                      bias=False)
        self.add_module(self.norm1_name, norm1)
        self.relu1 = nn.LeakyReLU(0.1)
        self.conv2 = build_conv_layer(conv_cfg,
                                      int(planes / 2),
                                      planes,
                                      3,
                                      padding=1,
                                      bias=False)
        self.add_module(self.norm2_name, norm2)

        self.relu2 = nn.LeakyReLU(0.1)
        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
        assert not with_cp
Example #11
0
    def build_upsample(self,
                       inplanes,
                       planes,
                       norm_cfg=None,
                       no_upsample=False):
        if self.upsample_vanilla_conv:
            if isinstance(self.upsample_vanilla_conv, int):
                padding = int((self.upsample_vanilla_conv - 1) / 2)
                dila = padding
                mdcn = nn.Conv2d(inplanes,
                                 planes,
                                 3,
                                 stride=1,
                                 padding=padding,
                                 dilation=dila)
            else:
                mdcn = nn.Conv2d(inplanes, planes, 3, stride=1, padding=1)
        elif self.upsample_multiscale_conv:
            mdcn = build_conv_layer(dict(type='MultiScaleConv'), inplanes,
                                    planes)
        elif self.use_trident:
            mdcn = build_conv_layer(dict(type='TriConv'), inplanes, planes)
        elif self.up_conv_cfg:
            mdcn = build_conv_layer(self.up_conv_cfg, inplanes, planes)
        else:
            mdcn = ModulatedDeformConvPack(inplanes,
                                           planes,
                                           3,
                                           offset_mean=self.dcn_mean,
                                           stride=1,
                                           padding=1,
                                           dilation=1,
                                           deformable_groups=1)
        layers = []
        layers.append(mdcn)
        if norm_cfg:
            layers.append(build_norm_layer(norm_cfg, planes)[1])
        layers.append(nn.ReLU(inplace=True))
        if not no_upsample:
            up = nn.UpsamplingBilinear2d(scale_factor=2)
            layers.append(up)

        return nn.Sequential(*layers)
Example #12
0
    def _make_transition_layer(self, num_channels_pre_layer,
                               num_channels_cur_layer):
        num_branches_cur = len(num_channels_cur_layer)
        num_branches_pre = len(num_channels_pre_layer)

        transition_layers = []
        for i in range(num_branches_cur):
            if i < num_branches_pre:
                if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
                    transition_layers.append(
                        nn.Sequential(
                            build_conv_layer(self.conv_cfg,
                                             num_channels_pre_layer[i],
                                             num_channels_cur_layer[i],
                                             kernel_size=3,
                                             stride=1,
                                             padding=1,
                                             bias=False),
                            build_norm_layer(self.norm_cfg,
                                             num_channels_cur_layer[i])[1],
                            nn.ReLU(inplace=True)))
                else:
                    transition_layers.append(None)
            else:
                conv_downsamples = []
                for j in range(i + 1 - num_branches_pre):
                    in_channels = num_channels_pre_layer[-1]
                    out_channels = num_channels_cur_layer[i] \
                        if j == i - num_branches_pre else in_channels
                    conv_downsamples.append(
                        nn.Sequential(
                            build_conv_layer(self.conv_cfg,
                                             in_channels,
                                             out_channels,
                                             kernel_size=3,
                                             stride=2,
                                             padding=1,
                                             bias=False),
                            build_norm_layer(self.norm_cfg, out_channels)[1],
                            nn.ReLU(inplace=True)))
                transition_layers.append(nn.Sequential(*conv_downsamples))

        return nn.ModuleList(transition_layers)
Example #13
0
 def _make_stem_layer(self):
     self.conv1 = build_conv_layer(self.conv_cfg,
                                   3,
                                   64,
                                   kernel_size=7,
                                   stride=2,
                                   padding=3,
                                   bias=False)
     self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
     self.add_module(self.norm1_name, norm1)
     self.relu = nn.ReLU(inplace=True)
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
Example #14
0
 def _make_stem_layer(self):
     self.conv1 = build_conv_layer(self.conv_cfg,
                                   3,
                                   self.inplanes,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1,
                                   bias=False)
     self.norm1_name, norm1 = build_norm_layer(self.norm_cfg,
                                               self.inplanes,
                                               postfix=1)
     self.add_module(self.norm1_name, norm1)
     self.relu1 = nn.LeakyReLU(0.1)
Example #15
0
    def _make_conv_layer(self, out_channel, conv_num, use_exp_conv=False):
        head_convs = []
        for i in range(conv_num):
            inp = self.planes[-1] if i == 0 else self.head_conv
            head_convs.append(ConvModule(inp, self.head_conv, 3, padding=1))

        inp = self.planes[-1] if conv_num <= 0 else self.head_conv
        head_convs.append(nn.Conv2d(inp, out_channel, 1))
        if use_exp_conv:
            head_convs.append(
                build_conv_layer(dict(type='ExpConv'),
                                 out_channel,
                                 out_channel,
                                 neg_x=True))
        return nn.Sequential(*head_convs)
Example #16
0
 def __init__(self,
              in_channels,
              out_channels,
              conv_cfg=None,
              norm_cfg=dict(type='BN'),
              no_upsample=False,
              vallina=False,
              offset_mean=False):
     if vallina:
         if isinstance(vallina, int):
             padding = int((vallina - 1) / 2)
             dila = padding
             mdcn = nn.Conv2d(
                 in_channels,
                 out_channels,
                 3,
                 padding=padding,
                 dilation=dila)
         else:
             mdcn = nn.Conv2d(
                 in_channels,
                 out_channels,
                 3,
                 padding=1)
     elif conv_cfg:
         mdcn = build_conv_layer(conv_cfg, in_channels, out_channels)
     else:
         mdcn = ModulatedDeformConvPack(
             in_channels,
             out_channels,
             3,
             offset_mean=offset_mean,
             stride=1,
             padding=1,
             dilation=1,
             deformable_groups=1)
     layers = []
     layers.append(mdcn)
     if norm_cfg:
         layers.append(build_norm_layer(norm_cfg, out_channels)[1])
     layers.append(nn.ReLU(inplace=True))
     if not no_upsample:
         layers.append(nn.UpsamplingBilinear2d(scale_factor=2))
     super(UpsamplingLayers, self).__init__(*layers)
Example #17
0
    def __init__(self,
                 backbone,
                 rpn_head,
                 bbox_roi_extractor,
                 bbox_head,
                 mask_roi_extractor,
                 mask_head,
                 train_cfg,
                 test_cfg,
                 neck=None,
                 shared_head=None,
                 pretrained=None):
        super(CDMaskRCNN, self).__init__(
            backbone=backbone,
            neck=neck,
            shared_head=shared_head,
            rpn_head=rpn_head,
            bbox_roi_extractor=bbox_roi_extractor,
            bbox_head=bbox_head,
            mask_roi_extractor=mask_roi_extractor,
            mask_head=mask_head,
            train_cfg=train_cfg,
            test_cfg=test_cfg,
            pretrained=pretrained)

        conv_layer = []
        for i in range(len(self.backbone.out_indices)):
            layer = nn.Sequential(
            build_conv_layer(
                None,
                1,
                256 * 2**i,
                kernel_size=1,
                stride=1,
                bias=False),
            build_norm_layer(dict(type='BN', requires_grad=True), 256 * 2**i)[1],
            nn.ReLU(inplace=True)
        )

            conv_layer.append(layer)
        self.conv = nn.Sequential(*conv_layer)
Example #18
0
    def _make_one_branch(self,
                         branch_index,
                         block,
                         num_blocks,
                         num_channels,
                         stride=1):
        downsample = None
        if stride != 1 or \
                self.in_channels[branch_index] != \
                num_channels[branch_index] * block.expansion:
            downsample = nn.Sequential(
                build_conv_layer(self.conv_cfg,
                                 self.in_channels[branch_index],
                                 num_channels[branch_index] * block.expansion,
                                 kernel_size=1,
                                 stride=stride,
                                 bias=False),
                build_norm_layer(self.norm_cfg, num_channels[branch_index] *
                                 block.expansion)[1])

        layers = []
        layers.append(
            block(self.in_channels[branch_index],
                  num_channels[branch_index],
                  stride,
                  downsample=downsample,
                  with_cp=self.with_cp,
                  norm_cfg=self.norm_cfg,
                  conv_cfg=self.conv_cfg))
        self.in_channels[branch_index] = \
            num_channels[branch_index] * block.expansion
        for i in range(1, num_blocks[branch_index]):
            layers.append(
                block(self.in_channels[branch_index],
                      num_channels[branch_index],
                      with_cp=self.with_cp,
                      norm_cfg=self.norm_cfg,
                      conv_cfg=self.conv_cfg))

        return nn.Sequential(*layers)
Example #19
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None,
                 gcb=None,
                 gen_attention=None):
        """Bottleneck block for ResNet.
        If style is "pytorch", the stride-two layer is the 3x3 conv layer,
        if it is "caffe", the stride-two layer is the first 1x1 conv layer.
        """
        super(Bottleneck, self).__init__()
        assert style in ['pytorch', 'caffe']
        assert dcn is None or isinstance(dcn, dict)
        assert gcb is None or isinstance(gcb, dict)
        assert gen_attention is None or isinstance(gen_attention, dict)

        self.inplanes = inplanes
        self.planes = planes
        self.stride = stride
        self.dilation = dilation
        self.style = style
        self.with_cp = with_cp
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.dcn = dcn
        self.with_dcn = dcn is not None
        self.gcb = gcb
        self.with_gcb = gcb is not None
        self.gen_attention = gen_attention
        self.with_gen_attention = gen_attention is not None

        if self.style == 'pytorch':
            self.conv1_stride = 1
            self.conv2_stride = stride
        else:
            self.conv1_stride = stride
            self.conv2_stride = 1

        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
        self.norm3_name, norm3 = build_norm_layer(
            norm_cfg, planes * self.expansion, postfix=3)

        self.conv1 = build_conv_layer(
            conv_cfg,
            inplanes,
            planes,
            kernel_size=1,
            stride=self.conv1_stride,
            bias=False)
        self.add_module(self.norm1_name, norm1)
        fallback_on_stride = False
        if self.with_dcn:
            fallback_on_stride = dcn.pop('fallback_on_stride', False)
        if not self.with_dcn or fallback_on_stride:
            self.conv2 = build_conv_layer(
                conv_cfg,
                planes,
                planes,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=dilation,
                dilation=dilation,
                bias=False)
        else:
            assert self.conv_cfg is None, 'conv_cfg cannot be None for DCN'
            self.conv2 = build_conv_layer(
                dcn,
                planes,
                planes,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=dilation,
                dilation=dilation,
                bias=False)

        self.add_module(self.norm2_name, norm2)
        self.conv3 = build_conv_layer(
            conv_cfg,
            planes,
            planes * self.expansion,
            kernel_size=1,
            bias=False)
        self.add_module(self.norm3_name, norm3)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample

        if self.with_gcb:
            gcb_inplanes = planes * self.expansion
            self.context_block = ContextBlock(inplanes=gcb_inplanes, **gcb)

        # gen_attention
        if self.with_gen_attention:
            self.gen_attention_block = GeneralizedAttention(
                planes, **gen_attention)
Example #20
0
    def __init__(self, inplanes, planes, groups=1, base_width=4, **kwargs):
        """Bottleneck block for ResNeXt.
        If style is "pytorch", the stride-two layer is the 3x3 conv layer,
        if it is "caffe", the stride-two layer is the first 1x1 conv layer.
        """
        super(Bottleneck, self).__init__(inplanes, planes, **kwargs)

        if groups == 1:
            width = self.planes
        else:
            width = math.floor(self.planes * (base_width / 64)) * groups

        self.norm1_name, norm1 = build_norm_layer(self.norm_cfg,
                                                  width,
                                                  postfix=1)
        self.norm2_name, norm2 = build_norm_layer(self.norm_cfg,
                                                  width,
                                                  postfix=2)
        self.norm3_name, norm3 = build_norm_layer(self.norm_cfg,
                                                  self.planes * self.expansion,
                                                  postfix=3)

        self.conv1 = build_conv_layer(self.conv_cfg,
                                      self.inplanes,
                                      width,
                                      kernel_size=1,
                                      stride=self.conv1_stride,
                                      bias=False)
        self.add_module(self.norm1_name, norm1)
        fallback_on_stride = False
        self.with_modulated_dcn = False
        if self.with_dcn:
            fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
        if not self.with_dcn or fallback_on_stride:
            self.conv2 = build_conv_layer(self.conv_cfg,
                                          width,
                                          width,
                                          kernel_size=3,
                                          stride=self.conv2_stride,
                                          padding=self.dilation,
                                          dilation=self.dilation,
                                          groups=groups,
                                          bias=False)
        else:
            assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
            self.conv2 = build_conv_layer(self.dcn,
                                          width,
                                          width,
                                          kernel_size=3,
                                          stride=self.conv2_stride,
                                          padding=self.dilation,
                                          dilation=self.dilation,
                                          groups=groups,
                                          bias=False)

        self.add_module(self.norm2_name, norm2)
        self.conv3 = build_conv_layer(self.conv_cfg,
                                      width,
                                      self.planes * self.expansion,
                                      kernel_size=1,
                                      bias=False)
        self.add_module(self.norm3_name, norm3)
Example #21
0
    def __init__(self,
                 extra,
                 in_channels=3,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 norm_eval=True,
                 with_cp=False,
                 zero_init_residual=False):
        super(HRNet, self).__init__()
        self.extra = extra
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.norm_eval = norm_eval
        self.with_cp = with_cp
        self.zero_init_residual = zero_init_residual

        # stem net
        self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)

        self.conv1 = build_conv_layer(self.conv_cfg,
                                      in_channels,
                                      64,
                                      kernel_size=3,
                                      stride=2,
                                      padding=1,
                                      bias=False)

        self.add_module(self.norm1_name, norm1)
        self.conv2 = build_conv_layer(self.conv_cfg,
                                      64,
                                      64,
                                      kernel_size=3,
                                      stride=2,
                                      padding=1,
                                      bias=False)

        self.add_module(self.norm2_name, norm2)
        self.relu = nn.ReLU(inplace=True)

        # stage 1
        self.stage1_cfg = self.extra['stage1']
        num_channels = self.stage1_cfg['num_channels'][0]
        block_type = self.stage1_cfg['block']
        num_blocks = self.stage1_cfg['num_blocks'][0]

        block = self.blocks_dict[block_type]
        stage1_out_channels = num_channels * block.expansion
        self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)

        # stage 2
        self.stage2_cfg = self.extra['stage2']
        num_channels = self.stage2_cfg['num_channels']
        block_type = self.stage2_cfg['block']

        block = self.blocks_dict[block_type]
        num_channels = [channel * block.expansion for channel in num_channels]
        self.transition1 = self._make_transition_layer([stage1_out_channels],
                                                       num_channels)
        self.stage2, pre_stage_channels = self._make_stage(
            self.stage2_cfg, num_channels)

        # stage 3
        self.stage3_cfg = self.extra['stage3']
        num_channels = self.stage3_cfg['num_channels']
        block_type = self.stage3_cfg['block']

        block = self.blocks_dict[block_type]
        num_channels = [channel * block.expansion for channel in num_channels]
        self.transition2 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels)
        self.stage3, pre_stage_channels = self._make_stage(
            self.stage3_cfg, num_channels)

        # stage 4
        self.stage4_cfg = self.extra['stage4']
        num_channels = self.stage4_cfg['num_channels']
        block_type = self.stage4_cfg['block']

        block = self.blocks_dict[block_type]
        num_channels = [channel * block.expansion for channel in num_channels]
        self.transition3 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels)
        self.stage4, pre_stage_channels = self._make_stage(
            self.stage4_cfg, num_channels)
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None):
        """Bottleneck block for ResNet.
        If style is "pytorch", the stride-two layer is the 3x3 conv layer,
        if it is "caffe", the stride-two layer is the first 1x1 conv layer.
        """
        super(Bottleneck, self).__init__()
        assert style in ['pytorch', 'caffe']
        assert dcn is None or isinstance(dcn, dict)
        self.inplanes = inplanes
        self.planes = planes
        self.stride = stride
        self.dilation = dilation
        self.style = style
        self.with_cp = with_cp
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.dcn = dcn
        self.with_dcn = dcn is not None
        if self.style == 'pytorch':
            self.conv1_stride = 1
            self.conv2_stride = stride
        else:
            self.conv1_stride = stride
            self.conv2_stride = 1

        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
        self.norm3_name, norm3 = build_norm_layer(
            norm_cfg, planes * self.expansion, postfix=3)

        self.conv1 = build_conv_layer(
            conv_cfg,
            inplanes,
            planes,
            kernel_size=1,
            stride=self.conv1_stride,
            bias=False)
        self.add_module(self.norm1_name, norm1)
        fallback_on_stride = False
        self.with_modulated_dcn = False
        if self.with_dcn:
            fallback_on_stride = dcn.get('fallback_on_stride', False)
            self.with_modulated_dcn = dcn.get('modulated', False)
        if not self.with_dcn or fallback_on_stride:
            self.conv2 = build_conv_layer(
                conv_cfg,
                planes,
                planes,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=dilation,
                dilation=dilation,
                bias=False)
        else:
            assert conv_cfg is None, 'conv_cfg must be None for DCN'
            deformable_groups = dcn.get('deformable_groups', 1)
            if not self.with_modulated_dcn:
                conv_op = DeformConv
                offset_channels = 18
            else:
                conv_op = ModulatedDeformConv
                offset_channels = 27
            self.conv2_offset = nn.Conv2d(
                inplanes,
                deformable_groups * offset_channels,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=dilation,
                dilation=dilation)
            self.conv2 = conv_op(
                planes,
                planes,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=dilation,
                dilation=dilation,
                deformable_groups=deformable_groups,
                bias=False)
        self.add_module(self.norm2_name, norm2)
        self.conv3 = build_conv_layer(
            conv_cfg,
            planes,
            planes * self.expansion,
            kernel_size=1,
            bias=False)
        self.add_module(self.norm3_name, norm3)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
Example #23
0
    def init_conv_layers(self):
        self.norm1_name, norm1 = build_norm_layer(self.norm_cfg,
                                                  self.planes,
                                                  postfix=1)
        self.norm2_name, norm2 = build_norm_layer(self.norm_cfg,
                                                  self.planes,
                                                  postfix=2)
        self.norm3_name, norm3 = build_norm_layer(self.norm_cfg,
                                                  self.planes * self.expansion,
                                                  postfix=3)

        self.conv1 = build_conv_layer(self.conv_cfg,
                                      self.inplanes,
                                      self.planes,
                                      kernel_size=1,
                                      stride=self.conv1_stride,
                                      bias=False)
        self.add_module(self.norm1_name, norm1)
        fallback_on_stride = False
        self.with_modulated_dcn = False
        if self.with_dcn:
            fallback_on_stride = self.dcn.get('fallback_on_stride', False)
            self.with_modulated_dcn = self.dcn.get('modulated', False)
        if not self.with_dcn or fallback_on_stride:
            self.conv2 = build_conv_layer(self.conv_cfg,
                                          self.planes,
                                          self.planes,
                                          kernel_size=3,
                                          stride=self.conv2_stride,
                                          padding=self.dilation,
                                          dilation=self.dilation,
                                          bias=False)
        else:
            assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
            deformable_groups = self.dcn.get('deformable_groups', 1)
            if not self.with_modulated_dcn:
                conv_op = DeformConv
                offset_channels = 18
            else:
                conv_op = ModulatedDeformConv
                offset_channels = 27
            self.conv2_offset = nn.Conv2d(self.inplanes,
                                          deformable_groups * offset_channels,
                                          kernel_size=3,
                                          stride=self.conv2_stride,
                                          padding=self.dilation,
                                          dilation=self.dilation)
            self.conv2 = conv_op(self.planes,
                                 self.planes,
                                 kernel_size=3,
                                 stride=self.conv2_stride,
                                 padding=self.dilation,
                                 dilation=self.dilation,
                                 deformable_groups=deformable_groups,
                                 bias=False)
        self.add_module(self.norm2_name, norm2)
        self.conv3 = build_conv_layer(self.conv_cfg,
                                      self.planes,
                                      self.planes * self.expansion,
                                      kernel_size=1,
                                      bias=False)
        self.add_module(self.norm3_name, norm3)

        self.relu = nn.ReLU(inplace=True)
Example #24
0
def make_res_layer(block,
                   inplanes,
                   planes,
                   arch,
                   stride=1,
                   dilation=1,
                   style='pytorch',
                   with_cp=False,
                   conv_cfg=None,
                   norm_cfg=dict(type='BN'),
                   dcn=None,
                   gcb=None,
                   gen_attention=None,
                   gen_attention_blocks=[],
                   groups=1,
                   base_width=4):
    """Make resnet layer.

    :param block: block function

    :param inplanes: input feature map channel num
    :type inplanes: int

    :param planes: output feature map channel num
    :type planes: int

    :param arch: model arch
    :type arch: str

    :param stride: stride
    :type stride: int

    :param dilation: dilation
    :type dilation: int

    :param style: style
    :type style: str

    :param with_cp: with cp
    :type with_cp: bool

    :param conv_cfg: conv config
    :type conv_cfg: dict

    :param norm_cfg: norm config
    :type norm_cfg: dict

    :param dcn: deformable conv network

    :param gcb: gcb

    :param gen_attention: gen attention

    :param gen_attention_blocks: gen attention block

    :param groups: groups
    :type planes: int

    :param base_width: base width
    :type planes: int

    :return: layer
    """
    layers = []
    for i, layer_type in enumerate(arch):
        downsample = None
        stride = stride if i == 0 else 1
        if layer_type == 2:
            planes *= 2
        if stride != 1 or inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                build_conv_layer(
                    conv_cfg,
                    inplanes,
                    planes * block.expansion,
                    kernel_size=1,
                    stride=stride,
                    bias=False),
                build_norm_layer(norm_cfg, planes * block.expansion)[1])
        layers.append(
            block(
                inplanes=inplanes,
                planes=planes,
                stride=stride,
                dilation=dilation,
                downsample=downsample,
                style=style,
                with_cp=with_cp,
                conv_cfg=conv_cfg,
                norm_cfg=norm_cfg,
                dcn=dcn,
                gcb=gcb,
                groups=groups,
                base_width=base_width,
                gen_attention=gen_attention if
                (i in gen_attention_blocks) else None))
        inplanes = planes * block.expansion
    return nn.Sequential(*layers)
Example #25
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None,
                 gcb=None,
                 gen_attention=None,
                 groups=1,
                 base_width=4):
        super(Bottleneck, self).__init__()
        assert style in ['pytorch', 'caffe']
        assert dcn is None or isinstance(dcn, dict)
        assert gcb is None or isinstance(gcb, dict)
        assert gen_attention is None or isinstance(gen_attention, dict)

        self.inplanes = inplanes
        self.planes = planes
        self.stride = stride
        self.dilation = dilation
        self.style = style
        self.with_cp = with_cp
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.dcn = dcn
        self.with_dcn = dcn is not None
        self.gcb = gcb
        self.with_gcb = gcb is not None
        self.gen_attention = gen_attention
        self.with_gen_attention = gen_attention is not None

        if groups == 1:
            width = self.planes
        else:
            width = math.floor(self.planes * (base_width / 64)) * groups

        if self.style == 'pytorch':
            self.conv1_stride = 1
            self.conv2_stride = stride
        else:
            self.conv1_stride = stride
            self.conv2_stride = 1

        self.norm1_name, norm1 = build_norm_layer(norm_cfg, width, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, width, postfix=2)
        self.norm3_name, norm3 = build_norm_layer(
            norm_cfg, planes * self.expansion, postfix=3)

        self.conv1 = build_conv_layer(
            conv_cfg,
            inplanes,
            width,
            kernel_size=1,
            stride=self.conv1_stride,
            bias=False)
        self.add_module(self.norm1_name, norm1)
        fallback_on_stride = False
        self.with_modulated_dcn = False
        if self.with_dcn:
            fallback_on_stride = dcn.get('fallback_on_stride', False)
            self.with_modulated_dcn = dcn.get('modulated', False)
        if not self.with_dcn or fallback_on_stride:
            self.conv2 = build_conv_layer(
                conv_cfg,
                width,
                width,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=dilation,
                dilation=dilation,
                groups=groups,
                bias=False)
        else:
            assert conv_cfg is None, 'conv_cfg must be None for DCN'
            self.deformable_groups = dcn.get('deformable_groups', 1)
            if not self.with_modulated_dcn:
                conv_op = DeformConv
                offset_channels = 18
            else:
                conv_op = ModulatedDeformConv
                offset_channels = 27
            self.conv2_offset = nn.Conv2d(
                planes,
                self.deformable_groups * offset_channels,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=dilation,
                dilation=dilation)
            self.conv2 = conv_op(
                width,
                width,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=dilation,
                dilation=dilation,
                groups=groups,
                deformable_groups=self.deformable_groups,
                bias=False)
        self.add_module(self.norm2_name, norm2)
        self.conv3 = build_conv_layer(
            conv_cfg,
            width,
            planes * self.expansion,
            kernel_size=1,
            bias=False)
        self.add_module(self.norm3_name, norm3)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample

        if self.with_gcb:
            gcb_inplanes = planes * self.expansion
            self.context_block = ContextBlock(
                inplanes=gcb_inplanes,
                **gcb
            )

        # gen_attention
        if self.with_gen_attention:
            self.gen_attention_block = GeneralizedAttention(
                planes, **gen_attention)
Example #26
0
    def _make_deconv_layer(self,
                           inplanes,
                           num_layers,
                           num_filters,
                           num_kernels,
                           norm_cfg=None):
        """

        Args:
            inplanes: in-channel num.
            num_layers: deconv layer num.
            num_filters: out channel of the deconv layers.
            num_kernels: int
            norm_cfg: dict()

        Returns:
            stacked deconv layers.
        """
        assert num_layers == len(num_filters), \
            'ERROR: num_deconv_layers is different len(num_deconv_filters)'
        assert num_layers == len(num_kernels), \
            'ERROR: num_deconv_layers is different len(num_deconv_filters)'

        layers = []
        for i in range(num_layers):
            kernel, padding, output_padding = self._get_deconv_cfg(
                num_kernels[i])
            planes = num_filters[i]
            inplanes = inplanes if i == 0 else num_filters[i - 1]

            if self.use_trident:
                mdcn = build_conv_layer(dict(type='TriConv'), inplanes, planes)
            else:
                mdcn = ModulatedDeformConvPack(inplanes,
                                               planes,
                                               3,
                                               stride=1,
                                               padding=1,
                                               dilation=1,
                                               deformable_groups=1)
            if self.use_upsample_conv:
                up = nn.Sequential(nn.UpsamplingBilinear2d(scale_factor=2),
                                   nn.Conv2d(planes, planes, 3, padding=1))
            else:
                up = nn.ConvTranspose2d(in_channels=planes,
                                        out_channels=planes,
                                        kernel_size=kernel,
                                        stride=2,
                                        padding=padding,
                                        output_padding=output_padding,
                                        bias=self.deconv_with_bias)
                self.fill_up_weights(up)

            layers.append(mdcn)
            if norm_cfg:
                layers.append(build_norm_layer(norm_cfg, planes)[1])
            layers.append(nn.ReLU(inplace=True))

            layers.append(up)
            if norm_cfg:
                layers.append(build_norm_layer(norm_cfg, planes)[1])
            layers.append(nn.ReLU(inplace=True))

        return nn.Sequential(*layers)
Example #27
0
    def _init_branch_layers(self, planes, upsample_inplane, shortcut_inplanes):
        upsample_layers = nn.ModuleList([
            UpsamplingLayers(
                upsample_inplane, planes[0], norm_cfg=self.norm_cfg),
            UpsamplingLayers(
                planes[0], planes[1], norm_cfg=self.norm_cfg),
        ])

        shortcut_layers = nn.ModuleList()
        for (inp, outp, layer_num) in zip(shortcut_inplanes,
                                          planes, self.shortcut_cfg):
            assert layer_num > 0, "Shortcut connection must be included."
            shortcut_layers.append(
                ShortcutConnection(inp, outp, [3] * layer_num, self.shortcut_conv_cfg))

        wh_layers, hm_layers = [], []
        inp = planes[-1]
        for i in range(self.wh_head_conv_num):
            wh_layers.append(
                ConvModule(
                    inp,
                    self.wh_head_channels,
                    3,
                    padding=1,
                    conv_cfg=self.conv_cfg))
            inp = self.wh_head_channels
        if self.head_conv_cfg:
            wh_layers.append(
                build_conv_layer(
                    self.head_conv_cfg,
                    self.wh_head_channels,
                    4,
                    kernel_size=3,
                    padding=1
                )
            )
        else:
            wh_layers.append(nn.Conv2d(self.wh_head_channels, 4, 3, padding=1))

        inp = planes[-1]
        for i in range(self.hm_head_conv_num):
            hm_layers.append(
                ConvModule(
                    inp,
                    self.hm_head_channels,
                    3,
                    padding=1,
                    conv_cfg=self.conv_cfg))
            inp = self.hm_head_channels
        if self.head_conv_cfg:
            hm_layers.append(
                build_conv_layer(
                    self.head_conv_cfg,
                    self.hm_head_channels,
                    self.num_fg,
                    kernel_size=3,
                    padding=1
                )
            )
        elif self.depthwise_hm:
            hm_layers.append(DepthwiseHead(self.hm_head_channels, self.num_fg, 3,
                                           conv_relu=self.relu_before_depthwise,
                                           depth_kernel_sizes=self.depth_kernel_sizes,
                                           depth_group=self.depth_group,
                                           use_deform=self.depth_deform))
        else:
            hm_layers.append(nn.Conv2d(self.hm_head_channels, self.num_fg, 3, padding=1))

        wh_layers = nn.Sequential(*wh_layers)
        hm_layers = nn.Sequential(*hm_layers)
        return upsample_layers, shortcut_layers, wh_layers, hm_layers
Example #28
0
    def _init_branch_layers(self, planes):
        wh_layers, wh2_layers, hm_layers = [], [], []
        inp = planes
        for i in range(self.wh_head_conv_num[0]):
            wh_layers.append(
                ConvModule(inp,
                           self.wh_head_channels[0],
                           3,
                           padding=1,
                           conv_cfg=self.conv_cfg))
            inp = self.wh_head_channels[0]
        if self.head_conv_cfg:
            wh_layers.append(
                build_conv_layer(self.head_conv_cfg,
                                 self.wh_head_channels[0],
                                 4,
                                 kernel_size=3,
                                 padding=1))
        else:
            wh_layers.append(
                nn.Conv2d(self.wh_head_channels[0], 4, 3, padding=1))

        inp = planes
        for i in range(self.wh_head_conv_num[1]):
            wh2_layers.append(
                ConvModule(inp,
                           self.wh_head_channels[1],
                           3,
                           padding=1,
                           conv_cfg=self.conv_cfg))
            inp = self.wh_head_channels[1]
        if self.head_conv_cfg:
            wh2_layers.append(
                build_conv_layer(self.head_conv_cfg,
                                 self.wh_head_channels[1],
                                 4,
                                 kernel_size=3,
                                 padding=1))
        else:
            wh2_layers.append(
                nn.Conv2d(self.wh_head_channels[1], 4, 3, padding=1))

        inp = planes
        for i in range(self.hm_head_conv_num):
            hm_layers.append(
                ConvModule(inp,
                           self.hm_head_channels,
                           3,
                           padding=1,
                           conv_cfg=self.conv_cfg))
            inp = self.hm_head_channels
        if self.head_conv_cfg:
            hm_layers.append(
                build_conv_layer(self.head_conv_cfg,
                                 self.hm_head_channels,
                                 self.num_fg,
                                 kernel_size=3,
                                 padding=1))
        else:
            hm_layers.append(
                nn.Conv2d(self.hm_head_channels, self.num_fg, 3, padding=1))

        wh_layers = nn.Sequential(*wh_layers)
        wh2_layers = nn.Sequential(*wh2_layers)
        hm_layers = nn.Sequential(*hm_layers)
        return wh_layers, wh2_layers, hm_layers