def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0),
                 dilation=(1, 1), groups=1, bias=True,
                 radix=2, reduction_factor=4,
                 rectify=False, rectify_avg=False, norm_layer=None, num_splits=1,
                 dropblock_prob=0.0, **kwargs):
        super(SplAtConv2d, self).__init__()
        padding = _pair(padding)
        self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)
        self.rectify_avg = rectify_avg
        inter_channels = max(in_channels * radix // reduction_factor, 32)
        self.radix = radix
        self.cardinality = groups
        self.channels = channels
        self.dropblock_prob = dropblock_prob
        if self.rectify:
            from rfconv import RFConv2d
            self.conv = RFConv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation,
                                 groups=groups * radix, bias=bias, average_mode=rectify_avg, **kwargs)
        else:
            self.conv = Conv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation,
                               groups=groups * radix, bias=bias, **kwargs)
        self.use_bn = norm_layer is not None
        if self.use_bn:
            self.bn0 = get_norm(norm_layer, channels * radix, num_splits)
        self.relu = ReLU(inplace=True)
        self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
        if self.use_bn:
            self.bn1 = get_norm(norm_layer, inter_channels, num_splits)
        self.fc2 = Conv2d(inter_channels, channels * radix, 1, groups=self.cardinality)

        self.rsoftmax = rSoftMax(radix, groups)
 def __init__(self,
              inplanes,
              planes,
              bn_norm,
              num_splits,
              with_ibn=False,
              with_se=False,
              stride=1,
              downsample=None,
              reduction=16):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     if with_ibn:
         self.bn1 = IBN(planes, bn_norm, num_splits)
     else:
         self.bn1 = get_norm(bn_norm, planes, num_splits)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False)
     self.bn2 = get_norm(bn_norm, planes, num_splits)
     self.conv3 = nn.Conv2d(planes,
                            planes * self.expansion,
                            kernel_size=1,
                            bias=False)
     self.bn3 = get_norm(bn_norm, planes * self.expansion, num_splits)
     self.relu = nn.ReLU(inplace=True)
     if with_se:
         self.se = SELayer(planes * self.expansion, reduction)
     else:
         self.se = nn.Identity()
     self.downsample = downsample
     self.stride = stride
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    stride=1,
                    bn_norm="BN",
                    num_splits=1,
                    with_ibn=False,
                    with_se=False):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                get_norm(bn_norm, planes * block.expansion, num_splits),
            )

        layers = []
        if planes == 512:
            with_ibn = False
        layers.append(
            block(self.inplanes, planes, bn_norm, num_splits, with_ibn,
                  with_se, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes, planes, bn_norm, num_splits, with_ibn,
                      with_se))

        return nn.Sequential(*layers)
    def __init__(self, last_stride, bn_norm, num_splits, with_ibn, with_se,
                 with_nl, block, layers, non_layers):
        self.inplanes = 64
        super().__init__()
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = get_norm(bn_norm, 64, num_splits)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0], 1, bn_norm,
                                       num_splits, with_ibn, with_se)
        self.layer2 = self._make_layer(block, 128, layers[1], 2, bn_norm,
                                       num_splits, with_ibn, with_se)
        self.layer3 = self._make_layer(block, 256, layers[2], 2, bn_norm,
                                       num_splits, with_ibn, with_se)
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       last_stride,
                                       bn_norm,
                                       num_splits,
                                       with_se=with_se)

        self.random_init()

        if with_nl:
            self._build_nonlocal(layers, non_layers, bn_norm, num_splits)
        else:
            self.NL_1_idx = self.NL_2_idx = self.NL_3_idx = self.NL_4_idx = []
Beispiel #5
0
 def _build_pool_reduce(pool_layer, bn_norm, num_splits, input_dim=2048, reduce_dim=256):
     pool_reduce = nn.Sequential(
         pool_layer,
         nn.Conv2d(input_dim, reduce_dim, 1, bias=False),
         get_norm(bn_norm, reduce_dim, num_splits),
         nn.ReLU(True),
     )
     pool_reduce.apply(weights_init_kaiming)
     return pool_reduce
    def __init__(self,
                 inplanes,
                 planes,
                 bn_norm,
                 num_splits,
                 with_ibn=False,
                 stride=1,
                 downsample=None,
                 radix=1,
                 cardinality=1,
                 bottleneck_width=64,
                 avd=False,
                 avd_first=False,
                 dilation=1,
                 is_first=False,
                 rectified_conv=False,
                 rectify_avg=False,
                 dropblock_prob=0.0,
                 last_gamma=False):
        super(Bottleneck, self).__init__()
        group_width = int(planes * (bottleneck_width / 64.)) * cardinality
        self.conv1 = nn.Conv2d(inplanes,
                               group_width,
                               kernel_size=1,
                               bias=False)
        if with_ibn:
            self.bn1 = IBN(group_width, bn_norm, num_splits)
        else:
            self.bn1 = get_norm(bn_norm, group_width, num_splits)
        self.dropblock_prob = dropblock_prob
        self.radix = radix
        self.avd = avd and (stride > 1 or is_first)
        self.avd_first = avd_first

        if self.avd:
            self.avd_layer = nn.AvgPool2d(3, stride, padding=1)
            stride = 1

        if radix > 1:
            self.conv2 = SplAtConv2d(group_width,
                                     group_width,
                                     kernel_size=3,
                                     stride=stride,
                                     padding=dilation,
                                     dilation=dilation,
                                     groups=cardinality,
                                     bias=False,
                                     radix=radix,
                                     rectify=rectified_conv,
                                     rectify_avg=rectify_avg,
                                     norm_layer=bn_norm,
                                     num_splits=num_splits,
                                     dropblock_prob=dropblock_prob)
        elif rectified_conv:
            from rfconv import RFConv2d
            self.conv2 = RFConv2d(group_width,
                                  group_width,
                                  kernel_size=3,
                                  stride=stride,
                                  padding=dilation,
                                  dilation=dilation,
                                  groups=cardinality,
                                  bias=False,
                                  average_mode=rectify_avg)
            self.bn2 = get_norm(bn_norm, group_width, num_splits)
        else:
            self.conv2 = nn.Conv2d(group_width,
                                   group_width,
                                   kernel_size=3,
                                   stride=stride,
                                   padding=dilation,
                                   dilation=dilation,
                                   groups=cardinality,
                                   bias=False)
            self.bn2 = get_norm(bn_norm, group_width, num_splits)

        self.conv3 = nn.Conv2d(group_width,
                               planes * 4,
                               kernel_size=1,
                               bias=False)
        self.bn3 = get_norm(bn_norm, planes * 4, num_splits)

        if last_gamma:
            from torch.nn.init import zeros_
            zeros_(self.bn3.weight)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.dilation = dilation
        self.stride = stride
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    stride=1,
                    bn_norm="BN",
                    num_splits=1,
                    with_ibn=False,
                    dilation=1,
                    dropblock_prob=0.0,
                    is_first=True):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            down_layers = []
            if self.avg_down:
                if dilation == 1:
                    down_layers.append(
                        nn.AvgPool2d(kernel_size=stride,
                                     stride=stride,
                                     ceil_mode=True,
                                     count_include_pad=False))
                else:
                    down_layers.append(
                        nn.AvgPool2d(kernel_size=1,
                                     stride=1,
                                     ceil_mode=True,
                                     count_include_pad=False))
                down_layers.append(
                    nn.Conv2d(self.inplanes,
                              planes * block.expansion,
                              kernel_size=1,
                              stride=1,
                              bias=False))
            else:
                down_layers.append(
                    nn.Conv2d(self.inplanes,
                              planes * block.expansion,
                              kernel_size=1,
                              stride=stride,
                              bias=False))
            down_layers.append(
                get_norm(bn_norm, planes * block.expansion, num_splits))
            downsample = nn.Sequential(*down_layers)

        layers = []
        if planes == 512:
            with_ibn = False
        if dilation == 1 or dilation == 2:
            layers.append(
                block(self.inplanes,
                      planes,
                      bn_norm,
                      num_splits,
                      with_ibn,
                      stride,
                      downsample=downsample,
                      radix=self.radix,
                      cardinality=self.cardinality,
                      bottleneck_width=self.bottleneck_width,
                      avd=self.avd,
                      avd_first=self.avd_first,
                      dilation=1,
                      is_first=is_first,
                      rectified_conv=self.rectified_conv,
                      rectify_avg=self.rectify_avg,
                      dropblock_prob=dropblock_prob,
                      last_gamma=self.last_gamma))
        elif dilation == 4:
            layers.append(
                block(self.inplanes,
                      planes,
                      bn_norm,
                      num_splits,
                      with_ibn,
                      stride,
                      downsample=downsample,
                      radix=self.radix,
                      cardinality=self.cardinality,
                      bottleneck_width=self.bottleneck_width,
                      avd=self.avd,
                      avd_first=self.avd_first,
                      dilation=2,
                      is_first=is_first,
                      rectified_conv=self.rectified_conv,
                      rectify_avg=self.rectify_avg,
                      dropblock_prob=dropblock_prob,
                      last_gamma=self.last_gamma))
        else:
            raise RuntimeError("=> unknown dilation size: {}".format(dilation))

        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes,
                      planes,
                      bn_norm,
                      num_splits,
                      with_ibn,
                      radix=self.radix,
                      cardinality=self.cardinality,
                      bottleneck_width=self.bottleneck_width,
                      avd=self.avd,
                      avd_first=self.avd_first,
                      dilation=dilation,
                      rectified_conv=self.rectified_conv,
                      rectify_avg=self.rectify_avg,
                      dropblock_prob=dropblock_prob,
                      last_gamma=self.last_gamma))

        return nn.Sequential(*layers)
    def __init__(self,
                 last_stride,
                 bn_norm,
                 num_splits,
                 with_ibn,
                 with_nl,
                 block,
                 layers,
                 non_layers,
                 radix=1,
                 groups=1,
                 bottleneck_width=64,
                 dilated=False,
                 dilation=1,
                 deep_stem=False,
                 stem_width=64,
                 avg_down=False,
                 rectified_conv=False,
                 rectify_avg=False,
                 avd=False,
                 avd_first=False,
                 final_drop=0.0,
                 dropblock_prob=0,
                 last_gamma=False):
        self.cardinality = groups
        self.bottleneck_width = bottleneck_width
        # ResNet-D params
        self.inplanes = stem_width * 2 if deep_stem else 64
        self.avg_down = avg_down
        self.last_gamma = last_gamma
        # ResNeSt params
        self.radix = radix
        self.avd = avd
        self.avd_first = avd_first

        super().__init__()
        self.rectified_conv = rectified_conv
        self.rectify_avg = rectify_avg
        if rectified_conv:
            from rfconv import RFConv2d
            conv_layer = RFConv2d
        else:
            conv_layer = nn.Conv2d
        conv_kwargs = {'average_mode': rectify_avg} if rectified_conv else {}
        if deep_stem:
            self.conv1 = nn.Sequential(
                conv_layer(3,
                           stem_width,
                           kernel_size=3,
                           stride=2,
                           padding=1,
                           bias=False,
                           **conv_kwargs),
                get_norm(bn_norm, stem_width, num_splits),
                nn.ReLU(inplace=True),
                conv_layer(stem_width,
                           stem_width,
                           kernel_size=3,
                           stride=1,
                           padding=1,
                           bias=False,
                           **conv_kwargs),
                get_norm(bn_norm, stem_width, num_splits),
                nn.ReLU(inplace=True),
                conv_layer(stem_width,
                           stem_width * 2,
                           kernel_size=3,
                           stride=1,
                           padding=1,
                           bias=False,
                           **conv_kwargs),
            )
        else:
            self.conv1 = conv_layer(3,
                                    64,
                                    kernel_size=7,
                                    stride=2,
                                    padding=3,
                                    bias=False,
                                    **conv_kwargs)
        self.bn1 = get_norm(bn_norm, self.inplanes, num_splits)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block,
                                       64,
                                       layers[0],
                                       1,
                                       bn_norm,
                                       num_splits,
                                       with_ibn=with_ibn,
                                       is_first=False)
        self.layer2 = self._make_layer(block,
                                       128,
                                       layers[1],
                                       2,
                                       bn_norm,
                                       num_splits,
                                       with_ibn=with_ibn)
        if dilated or dilation == 4:
            self.layer3 = self._make_layer(block,
                                           256,
                                           layers[2],
                                           1,
                                           bn_norm,
                                           num_splits,
                                           with_ibn=with_ibn,
                                           dilation=2,
                                           dropblock_prob=dropblock_prob)
            self.layer4 = self._make_layer(block,
                                           512,
                                           layers[3],
                                           1,
                                           bn_norm,
                                           num_splits,
                                           with_ibn=with_ibn,
                                           dilation=4,
                                           dropblock_prob=dropblock_prob)
        elif dilation == 2:
            self.layer3 = self._make_layer(block,
                                           256,
                                           layers[2],
                                           2,
                                           bn_norm,
                                           num_splits,
                                           with_ibn=with_ibn,
                                           dilation=1,
                                           dropblock_prob=dropblock_prob)
            self.layer4 = self._make_layer(block,
                                           512,
                                           layers[3],
                                           1,
                                           bn_norm,
                                           num_splits,
                                           with_ibn=with_ibn,
                                           dilation=2,
                                           dropblock_prob=dropblock_prob)
        else:
            self.layer3 = self._make_layer(block,
                                           256,
                                           layers[2],
                                           2,
                                           bn_norm,
                                           num_splits,
                                           with_ibn=with_ibn,
                                           dropblock_prob=dropblock_prob)
            self.layer4 = self._make_layer(block,
                                           512,
                                           layers[3],
                                           last_stride,
                                           bn_norm,
                                           num_splits,
                                           with_ibn=with_ibn,
                                           dropblock_prob=dropblock_prob)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

        if with_nl:
            self._build_nonlocal(layers, non_layers, bn_norm, num_splits)
        else:
            self.NL_1_idx = self.NL_2_idx = self.NL_3_idx = self.NL_4_idx = []
Beispiel #9
0
    def __init__(self, cfg):
        super().__init__()
        self._cfg = cfg
        assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)
        self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(1, -1, 1, 1))
        self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(1, -1, 1, 1))

        # backbone
        bn_norm = cfg.MODEL.BACKBONE.NORM
        num_splits = cfg.MODEL.BACKBONE.NORM_SPLIT
        with_se = cfg.MODEL.BACKBONE.WITH_SE

        backbone = build_backbone(cfg)
        self.backbone = nn.Sequential(
            backbone.conv1,
            backbone.bn1,
            backbone.relu,
            backbone.maxpool,
            backbone.layer1,
            backbone.layer2,
            backbone.layer3[0]
        )
        res_conv4 = nn.Sequential(*backbone.layer3[1:])
        res_g_conv5 = backbone.layer4

        res_p_conv5 = nn.Sequential(
            Bottleneck(1024, 512, bn_norm, num_splits, False, with_se, downsample=nn.Sequential(
                nn.Conv2d(1024, 2048, 1, bias=False), get_norm(bn_norm, 2048, num_splits))),
            Bottleneck(2048, 512, bn_norm, num_splits, False, with_se),
            Bottleneck(2048, 512, bn_norm, num_splits, False, with_se))
        res_p_conv5.load_state_dict(backbone.layer4.state_dict())

        pool_type = cfg.MODEL.HEADS.POOL_LAYER
        if pool_type == 'avgpool':      pool_layer = FastGlobalAvgPool2d()
        elif pool_type == 'maxpool':    pool_layer = nn.AdaptiveMaxPool2d(1)
        elif pool_type == 'gempool':    pool_layer = GeneralizedMeanPoolingP()
        elif pool_type == "avgmaxpool": pool_layer = AdaptiveAvgMaxPool2d()
        elif pool_type == "identity":   pool_layer = nn.Identity()
        else:
            raise KeyError(f"{pool_type} is invalid, please choose from "
                           f"'avgpool', 'maxpool', 'gempool', 'avgmaxpool' and 'identity'.")

        # head
        in_feat = cfg.MODEL.HEADS.IN_FEAT
        num_classes = cfg.MODEL.HEADS.NUM_CLASSES
        # branch1
        self.b1 = nn.Sequential(
            copy.deepcopy(res_conv4), copy.deepcopy(res_g_conv5)
        )
        self.b1_pool = self._build_pool_reduce(pool_layer, bn_norm, num_splits, reduce_dim=in_feat)

        self.b1_head = build_reid_heads(cfg, in_feat, num_classes, nn.Identity())

        # branch2
        self.b2 = nn.Sequential(
            copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5)
        )
        self.b2_pool = self._build_pool_reduce(pool_layer, bn_norm, num_splits, reduce_dim=in_feat)
        self.b2_head = build_reid_heads(cfg, in_feat, num_classes, nn.Identity())

        self.b21_pool = self._build_pool_reduce(pool_layer, bn_norm, num_splits, reduce_dim=in_feat)
        self.b21_head = build_reid_heads(cfg, in_feat, num_classes, nn.Identity())

        self.b22_pool = self._build_pool_reduce(pool_layer, bn_norm, num_splits, reduce_dim=in_feat)
        self.b22_head = build_reid_heads(cfg, in_feat, num_classes, nn.Identity())

        # branch3
        self.b3 = nn.Sequential(
            copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5)
        )
        self.b3_pool = self._build_pool_reduce(pool_layer, bn_norm, num_splits, reduce_dim=in_feat)
        self.b3_head = build_reid_heads(cfg, in_feat, num_classes, nn.Identity())

        self.b31_pool = self._build_pool_reduce(pool_layer, bn_norm, num_splits, reduce_dim=in_feat)
        self.b31_head = build_reid_heads(cfg, in_feat, num_classes, nn.Identity())

        self.b32_pool = self._build_pool_reduce(pool_layer, bn_norm, num_splits, reduce_dim=in_feat)
        self.b32_head = build_reid_heads(cfg, in_feat, num_classes, nn.Identity())

        self.b33_pool = self._build_pool_reduce(pool_layer, bn_norm, num_splits, reduce_dim=in_feat)
        self.b33_head = build_reid_heads(cfg, in_feat, num_classes, nn.Identity())