Esempio n. 1
0
    def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0),
                 dilation=(1, 1), groups=1, bias=True,
                 radix=2, reduction_factor=4,
                 rectify=False, rectify_avg=False, norm_layer=None, num_splits=1,
                 dropblock_prob=0.0, **kwargs):
        super(SplAtConv2d, self).__init__()
        padding = _pair(padding)
        self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)
        self.rectify_avg = rectify_avg
        inter_channels = max(in_channels * radix // reduction_factor, 32)
        self.radix = radix
        self.cardinality = groups
        self.channels = channels
        self.dropblock_prob = dropblock_prob
        if self.rectify:
            from rfconv import RFConv2d
            self.conv = RFConv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation,
                                 groups=groups * radix, bias=bias, average_mode=rectify_avg, **kwargs)
        else:
            self.conv = Conv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation,
                               groups=groups * radix, bias=bias, **kwargs)
        self.use_bn = norm_layer is not None
        if self.use_bn:
            self.bn0 = get_norm(norm_layer, channels * radix, num_splits)
        self.relu = ReLU(inplace=True)
        self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
        if self.use_bn:
            self.bn1 = get_norm(norm_layer, inter_channels, num_splits)
        self.fc2 = Conv2d(inter_channels, channels * radix, 1, groups=self.cardinality)

        self.rsoftmax = rSoftMax(radix, groups)
Esempio n. 2
0
 def __init__(self,
              inplanes,
              planes,
              bn_norm,
              num_splits,
              with_ibn=False,
              with_se=False,
              stride=1,
              downsample=None,
              reduction=16):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
     if with_ibn:
         self.bn1 = IBN(planes, bn_norm, num_splits)
     else:
         self.bn1 = get_norm(bn_norm, planes, num_splits)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False)
     self.bn2 = get_norm(bn_norm, planes, num_splits)
     self.conv3 = nn.Conv2d(planes,
                            planes * self.expansion,
                            kernel_size=1,
                            bias=False)
     self.bn3 = get_norm(bn_norm, planes * self.expansion, num_splits)
     self.relu = nn.ReLU(inplace=True)
     if with_se:
         self.se = SELayer(planes * self.expansion, reduction)
     else:
         self.se = nn.Identity()
     self.downsample = downsample
     self.stride = stride
Esempio n. 3
0
    def __init__(self, inplanes, planes, bn_norm, num_splits, with_ibn, baseWidth, cardinality, stride=1,
                 downsample=None):
        """ Constructor
        Args:
            inplanes: input channel dimensionality
            planes: output channel dimensionality
            baseWidth: base width.
            cardinality: num of convolution groups.
            stride: conv stride. Replaces pooling layer.
        """
        super(Bottleneck, self).__init__()

        D = int(math.floor(planes * (baseWidth / 64)))
        C = cardinality
        self.conv1 = nn.Conv2d(inplanes, D * C, kernel_size=1, stride=1, padding=0, bias=False)
        if with_ibn:
            self.bn1 = IBN(D * C, bn_norm, num_splits)
        else:
            self.bn1 = get_norm(bn_norm, D * C, num_splits)
        self.conv2 = nn.Conv2d(D * C, D * C, kernel_size=3, stride=stride, padding=1, groups=C, bias=False)
        self.bn2 = get_norm(bn_norm, D * C, num_splits)
        self.conv3 = nn.Conv2d(D * C, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn3 = get_norm(bn_norm, planes * 4, num_splits)
        self.relu = nn.ReLU(inplace=True)

        self.downsample = downsample
Esempio n. 4
0
    def __init__(self, inplanes, planes, stride=1, downsample=None,
                 radix=1, cardinality=1, bottleneck_width=64,
                 avd=False, avd_first=False, dilation=1, is_first=False,
                 rectified_conv=False, rectify_avg=False,
                 norm_layer=None, dropblock_prob=0.0, last_gamma=False):
        super(Bottleneck, self).__init__()
        group_width = int(planes * (bottleneck_width / 64.)) * cardinality
        self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False)
        self.bn1 = get_norm(norm_layer, group_width)
        self.dropblock_prob = dropblock_prob
        self.radix = radix
        self.avd = avd and (stride > 1 or is_first)
        self.avd_first = avd_first

        if self.avd:
            self.avd_layer = nn.AvgPool2d(3, stride, padding=1)
            stride = 1

        if dropblock_prob > 0.0:
            self.dropblock1 = DropBlock2D(dropblock_prob, 3)
            if radix == 1:
                self.dropblock2 = DropBlock2D(dropblock_prob, 3)
            self.dropblock3 = DropBlock2D(dropblock_prob, 3)

        if radix >= 1:
            self.conv2 = SplAtConv2d(
                group_width, group_width, kernel_size=3,
                stride=stride, padding=dilation,
                dilation=dilation, groups=cardinality, bias=False,
                radix=radix, rectify=rectified_conv,
                rectify_avg=rectify_avg,
                norm_layer=norm_layer,
                dropblock_prob=dropblock_prob)
        elif rectified_conv:
            from rfconv import RFConv2d
            self.conv2 = RFConv2d(
                group_width, group_width, kernel_size=3, stride=stride,
                padding=dilation, dilation=dilation,
                groups=cardinality, bias=False,
                average_mode=rectify_avg)
            self.bn2 = get_norm(norm_layer, group_width)
        else:
            self.conv2 = nn.Conv2d(
                group_width, group_width, kernel_size=3, stride=stride,
                padding=dilation, dilation=dilation,
                groups=cardinality, bias=False)
            self.bn2 = get_norm(norm_layer, group_width)

        self.conv3 = nn.Conv2d(
            group_width, planes * 4, kernel_size=1, bias=False)
        self.bn3 = get_norm(norm_layer, planes * 4)

        if last_gamma:
            from torch.nn.init import zeros_
            zeros_(self.bn3.weight)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.dilation = dilation
        self.stride = stride
Esempio n. 5
0
 def construct(self, w_in, w_out, stride, bn_norm):
     # 3x3, BN, ReLU
     self.a = nn.Conv2d(
         w_in, w_out, kernel_size=3, stride=stride, padding=1, bias=False
     )
     self.a_bn = get_norm(bn_norm, w_out)
     self.a_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)
     # 3x3, BN, ReLU
     self.b = nn.Conv2d(w_out, w_out, kernel_size=3, stride=1, padding=1, bias=False)
     self.b_bn = get_norm(bn_norm, w_out)
     self.b_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)
Esempio n. 6
0
    def __init__(self, bn_norm, inp, oup, mid_channels, *, ksize, stride):
        super(ShuffleV2Block, self).__init__()
        self.stride = stride
        assert stride in [1, 2]

        self.mid_channels = mid_channels
        self.ksize = ksize
        pad = ksize // 2
        self.pad = pad
        self.inp = inp

        outputs = oup - inp

        branch_main = [
            # pw
            nn.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
            get_norm(bn_norm, mid_channels),
            nn.ReLU(inplace=True),
            # dw
            nn.Conv2d(mid_channels,
                      mid_channels,
                      ksize,
                      stride,
                      pad,
                      groups=mid_channels,
                      bias=False),
            get_norm(bn_norm, mid_channels),
            # pw-linear
            nn.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
            get_norm(bn_norm, outputs),
            nn.ReLU(inplace=True),
        ]
        self.branch_main = nn.Sequential(*branch_main)

        if stride == 2:
            branch_proj = [
                # dw
                nn.Conv2d(inp, inp, ksize, stride, pad, groups=inp,
                          bias=False),
                get_norm(bn_norm, inp),
                # pw-linear
                nn.Conv2d(inp, inp, 1, 1, 0, bias=False),
                get_norm(bn_norm, inp),
                nn.ReLU(inplace=True),
            ]
            self.branch_proj = nn.Sequential(*branch_proj)
        else:
            self.branch_proj = None
Esempio n. 7
0
 def __init__(self, in_channel, depth, bn_norm, stride, with_se=False):
     super(bottleneck_IR, self).__init__()
     if in_channel == depth:
         self.shortcut_layer = nn.MaxPool2d(1, stride)
     else:
         self.shortcut_layer = nn.Sequential(
             nn.Conv2d(in_channel, depth, (1, 1), stride, bias=False),
             get_norm(bn_norm, depth))
     self.res_layer = nn.Sequential(
         get_norm(bn_norm, in_channel),
         nn.Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
         nn.PReLU(depth),
         nn.Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
         get_norm(bn_norm, depth),
         SELayer(depth, 16) if with_se else nn.Identity()
     )
Esempio n. 8
0
 def construct(self, w_in, w_out, bn_norm):
     # 3x3, BN, ReLU
     self.conv = nn.Conv2d(
         w_in, w_out, kernel_size=3, stride=1, padding=1, bias=False
     )
     self.bn = get_norm(bn_norm, w_out, 1)
     self.relu = nn.ReLU(regnet_cfg.MEM.RELU_INPLACE)
Esempio n. 9
0
    def _make_layer(self, block, planes, blocks, stride=1, bn_norm='BN', num_splits=1, with_ibn=False):
        """ Stack n bottleneck modules where n is inferred from the depth of the network.
        Args:
            block: block type used to construct ResNext
            planes: number of output channels (need to multiply by block.expansion)
            blocks: number of blocks to be built
            stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
        Returns: a Module consisting of n sequential bottlenecks.
        """
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                get_norm(bn_norm, planes * block.expansion, num_splits),
            )

        layers = []
        if planes == 512:
            with_ibn = False
        layers.append(block(self.inplanes, planes, bn_norm, num_splits, with_ibn,
                            self.baseWidth, self.cardinality, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes, planes, bn_norm, num_splits, with_ibn, self.baseWidth, self.cardinality, 1, None))

        return nn.Sequential(*layers)
Esempio n. 10
0
 def __init__(
     self,
     in_planes: int,
     out_planes: int,
     kernel_size: int = 3,
     stride: int = 1,
     groups: int = 1,
     bn_norm=None,
     activation_layer: Optional[Callable[..., nn.Module]] = None,
     dilation: int = 1,
 ) -> None:
     padding = (kernel_size - 1) // 2 * dilation
     if activation_layer is None:
         activation_layer = nn.ReLU6
     super(ConvBNActivation, self).__init__(
         nn.Conv2d(in_planes,
                   out_planes,
                   kernel_size,
                   stride,
                   padding,
                   dilation=dilation,
                   groups=groups,
                   bias=False), get_norm(bn_norm, out_planes),
         activation_layer(inplace=True))
     self.out_channels = out_planes
Esempio n. 11
0
 def construct(self, in_w, out_w, bn_norm):
     # 3x3, BN, ReLU
     self.conv = nn.Conv2d(
         in_w, out_w, kernel_size=3, stride=2, padding=1, bias=False
     )
     self.bn = get_norm(bn_norm, out_w)
     self.relu = nn.ReLU(regnet_cfg.MEM.RELU_INPLACE)
Esempio n. 12
0
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    stride=1,
                    bn_norm="BN",
                    num_splits=1,
                    with_ibn=False,
                    with_se=False):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                get_norm(bn_norm, planes * block.expansion, num_splits),
            )

        layers = []
        if planes == 512:
            with_ibn = False
        layers.append(
            block(self.inplanes, planes, bn_norm, num_splits, with_ibn,
                  with_se, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(self.inplanes, planes, bn_norm, num_splits, with_ibn,
                      with_se))

        return nn.Sequential(*layers)
Esempio n. 13
0
    def __init__(self, last_stride, bn_norm, num_splits, with_ibn, with_se,
                 with_nl, block, layers, non_layers):
        self.inplanes = 64
        super().__init__()
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = get_norm(bn_norm, 64, num_splits)
        self.relu = nn.ReLU(inplace=True)
        # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
        self.layer1 = self._make_layer(block, 64, layers[0], 1, bn_norm,
                                       num_splits, with_ibn, with_se)
        self.layer2 = self._make_layer(block, 128, layers[1], 2, bn_norm,
                                       num_splits, with_ibn, with_se)
        self.layer3 = self._make_layer(block, 256, layers[2], 2, bn_norm,
                                       num_splits, with_ibn, with_se)
        self.layer4 = self._make_layer(block,
                                       512,
                                       layers[3],
                                       last_stride,
                                       bn_norm,
                                       num_splits,
                                       with_se=with_se)

        self.random_init()

        if with_nl:
            self._build_nonlocal(layers, non_layers, bn_norm, num_splits)
        else:
            self.NL_1_idx = self.NL_2_idx = self.NL_3_idx = self.NL_4_idx = []
Esempio n. 14
0
    def _make_layer(self,
                    block,
                    planes,
                    blocks,
                    bn_norm,
                    stride=1,
                    dilate=False):
        downsample = None
        previous_dilation = self.dilation
        if dilate:
            self.dilation *= stride
            stride = 1
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                conv1x1(self.inplanes, planes * block.expansion, stride),
                get_norm(bn_norm, planes * block.expansion),
            )
        layers = []
        layers.append(
            block(self.inplanes, planes, bn_norm, stride, downsample,
                  self.groups, self.base_width, previous_dilation))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(
                block(self.inplanes,
                      planes,
                      bn_norm,
                      groups=self.groups,
                      base_width=self.base_width,
                      dilation=self.dilation))

        return nn.Sequential(*layers)
Esempio n. 15
0
 def __init__(
         self,
         in_channels,
         out_channels,
         kernel_size,
         bn_norm,
         stride=1,
         padding=0,
         groups=1,
         IN=False
 ):
     super(ConvLayer, self).__init__()
     self.conv = nn.Conv2d(
         in_channels,
         out_channels,
         kernel_size,
         stride=stride,
         padding=padding,
         bias=False,
         groups=groups
     )
     if IN:
         self.bn = nn.InstanceNorm2d(out_channels, affine=True)
     else:
         self.bn = get_norm(bn_norm, out_channels)
     self.relu = nn.ReLU(inplace=True)
Esempio n. 16
0
    def __init__(self, last_stride, bn_norm, num_splits, with_ibn, block, layers, baseWidth=4, cardinality=32):
        """ Constructor
        Args:
            baseWidth: baseWidth for ResNeXt.
            cardinality: number of convolution groups.
            layers: config of layers, e.g., [3, 4, 6, 3]
            num_classes: number of classes
        """
        super(ResNeXt, self).__init__()

        self.cardinality = cardinality
        self.baseWidth = baseWidth
        self.inplanes = 64
        self.output_size = 64

        self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False)
        self.bn1 = get_norm(bn_norm, 64, num_splits)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0], 1, bn_norm, num_splits, with_ibn=with_ibn)
        self.layer2 = self._make_layer(block, 128, layers[1], 2, bn_norm, num_splits, with_ibn=with_ibn)
        self.layer3 = self._make_layer(block, 256, layers[2], 2, bn_norm, num_splits, with_ibn=with_ibn)
        self.layer4 = self._make_layer(block, 512, layers[3], last_stride, bn_norm, num_splits, with_ibn=with_ibn)

        self.random_init()
Esempio n. 17
0
    def __init__(self, cfg):
        super().__init__()
        self._cfg = cfg
        assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)
        self.register_buffer(
            "pixel_mean",
            torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(1, -1, 1, 1))
        self.register_buffer(
            "pixel_std",
            torch.Tensor(cfg.MODEL.PIXEL_STD).view(1, -1, 1, 1))

        # fmt: off
        # backbone
        bn_norm = cfg.MODEL.BACKBONE.NORM
        num_splits = cfg.MODEL.BACKBONE.NORM_SPLIT
        with_se = cfg.MODEL.BACKBONE.WITH_SE
        # fmt :on

        backbone = build_backbone(cfg)
        self.backbone = nn.Sequential(backbone.conv1, backbone.bn1,
                                      backbone.relu, backbone.maxpool,
                                      backbone.layer1, backbone.layer2,
                                      backbone.layer3[0])
        res_conv4 = nn.Sequential(*backbone.layer3[1:])
        res_g_conv5 = backbone.layer4

        res_p_conv5 = nn.Sequential(
            Bottleneck(1024,
                       512,
                       bn_norm,
                       num_splits,
                       False,
                       with_se,
                       downsample=nn.Sequential(
                           nn.Conv2d(1024, 2048, 1, bias=False),
                           get_norm(bn_norm, 2048, num_splits))),
            Bottleneck(2048, 512, bn_norm, num_splits, False, with_se),
            Bottleneck(2048, 512, bn_norm, num_splits, False, with_se))
        res_p_conv5.load_state_dict(backbone.layer4.state_dict())

        # branch1
        self.b1 = nn.Sequential(copy.deepcopy(res_conv4),
                                copy.deepcopy(res_g_conv5))
        self.b1_head = build_reid_heads(cfg)

        # branch2
        self.b2 = nn.Sequential(copy.deepcopy(res_conv4),
                                copy.deepcopy(res_p_conv5))
        self.b2_head = build_reid_heads(cfg)
        self.b21_head = build_reid_heads(cfg)
        self.b22_head = build_reid_heads(cfg)

        # branch3
        self.b3 = nn.Sequential(copy.deepcopy(res_conv4),
                                copy.deepcopy(res_p_conv5))
        self.b3_head = build_reid_heads(cfg)
        self.b31_head = build_reid_heads(cfg)
        self.b32_head = build_reid_heads(cfg)
        self.b33_head = build_reid_heads(cfg)
Esempio n. 18
0
 def construct(self, w_in, w_out, bn_norm):
     # 7x7, BN, ReLU, maxpool
     self.conv = nn.Conv2d(
         w_in, w_out, kernel_size=7, stride=2, padding=3, bias=False
     )
     self.bn = get_norm(bn_norm, w_out)
     self.relu = nn.ReLU(regnet_cfg.MEM.RELU_INPLACE)
     self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
Esempio n. 19
0
 def _add_skip_proj(self, w_in, w_out, stride, bn_norm):
     self.proj = nn.Conv2d(w_in,
                           w_out,
                           kernel_size=1,
                           stride=stride,
                           padding=0,
                           bias=False)
     self.bn = get_norm(bn_norm, w_out)
Esempio n. 20
0
 def _build_pool_reduce(pool_layer, bn_norm, num_splits, input_dim=2048, reduce_dim=256):
     pool_reduce = nn.Sequential(
         pool_layer,
         nn.Conv2d(input_dim, reduce_dim, 1, bias=False),
         get_norm(bn_norm, reduce_dim, num_splits),
         nn.ReLU(True),
     )
     pool_reduce.apply(weights_init_kaiming)
     return pool_reduce
Esempio n. 21
0
 def __init__(self, in_channels, out_channels, bn_norm, stride=1):
     super(Conv1x1Linear, self).__init__()
     self.conv = nn.Conv2d(in_channels,
                           out_channels,
                           1,
                           stride=stride,
                           padding=0,
                           bias=False)
     self.bn = get_norm(bn_norm, out_channels)
Esempio n. 22
0
    def __init__(self,
                 cfg,
                 in_feat,
                 num_classes,
                 pool_layer=nn.AdaptiveAvgPool2d(1)):
        super().__init__()

        self.pool_layer = pool_layer

        self.occ_unit = OcclusionUnit(in_planes=in_feat)
        self.MaxPool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
        self.MaxPool2 = nn.MaxPool2d(kernel_size=4, stride=2, padding=0)
        self.MaxPool3 = nn.MaxPool2d(kernel_size=6, stride=2, padding=0)
        self.MaxPool4 = nn.MaxPool2d(kernel_size=8, stride=2, padding=0)

        self.bnneck = get_norm(cfg.MODEL.HEADS.NORM,
                               in_feat,
                               cfg.MODEL.HEADS.NORM_SPLIT,
                               bias_freeze=True)
        self.bnneck.apply(weights_init_kaiming)
        self.bnneck_occ = get_norm(cfg.MODEL.HEADS.NORM,
                                   in_feat,
                                   cfg.MODEL.HEADS.NORM_SPLIT,
                                   bias_freeze=True)
        self.bnneck_occ.apply(weights_init_kaiming)

        # identity classification layer
        if cfg.MODEL.HEADS.CLS_LAYER == 'linear':
            self.classifier = nn.Linear(in_feat, num_classes, bias=False)
            self.classifier_occ = nn.Linear(in_feat, num_classes, bias=False)
            self.classifier.apply(weights_init_classifier)
            self.classifier_occ.apply(weights_init_classifier)
        elif cfg.MODEL.HEADS.CLS_LAYER == 'arcface':
            self.classifier = Arcface(cfg, in_feat)
            self.classifier_occ = Arcface(cfg, in_feat)
        elif cfg.MODEL.HEADS.CLS_LAYER == 'circle':
            self.classifier = Circle(cfg, in_feat)
            self.classifier_occ = Circle(cfg, in_feat)
        else:
            self.classifier = nn.Linear(in_feat, num_classes, bias=False)
            self.classifier_occ = nn.Linear(in_feat, num_classes, bias=False)
            self.classifier.apply(weights_init_classifier)
            self.classifier_occ.apply(weights_init_classifier)
Esempio n. 23
0
 def __init__(self, num_layers, bn_norm, drop_ratio, with_se):
     super(ResNetIR, self).__init__()
     assert num_layers in ["50x", "100x", "152x"], "num_layers should be 50,100, or 152"
     blocks = get_blocks(bn_norm, with_se, num_layers)
     self.input_layer = nn.Sequential(nn.Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                      get_norm(bn_norm, 64),
                                      nn.PReLU(64))
     self.output_layer = nn.Sequential(get_norm(bn_norm, 512),
                                       nn.Dropout(drop_ratio))
     modules = []
     for block in blocks:
         for bottleneck in block:
             modules.append(
                 bottleneck_IR(bottleneck.in_channel,
                               bottleneck.depth,
                               bottleneck.bn_norm,
                               bottleneck.stride,
                               bottleneck.with_se))
     self.body = nn.Sequential(*modules)
Esempio n. 24
0
    def __init__(self, inp, oup, bn_norm, stride, expand_ratio):
        super(InvertedResidual, self).__init__()
        assert stride in [1, 2]

        hidden_dim = round(inp * expand_ratio)
        self.identity = stride == 1 and inp == oup

        if expand_ratio == 1:
            self.conv = nn.Sequential(
                # dw
                nn.Conv2d(hidden_dim,
                          hidden_dim,
                          3,
                          stride,
                          1,
                          groups=hidden_dim,
                          bias=False),
                get_norm(bn_norm, hidden_dim),
                nn.ReLU6(inplace=True),
                # pw-linear
                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
                get_norm(bn_norm, oup),
            )
        else:
            self.conv = nn.Sequential(
                # pw
                nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
                get_norm(bn_norm, hidden_dim),
                nn.ReLU6(inplace=True),
                # dw
                nn.Conv2d(hidden_dim,
                          hidden_dim,
                          3,
                          stride,
                          1,
                          groups=hidden_dim,
                          bias=False),
                get_norm(bn_norm, hidden_dim),
                nn.ReLU6(inplace=True),
                # pw-linear
                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
                nn.BatchNorm2d(oup),
            )
Esempio n. 25
0
 def __init__(self, in_channels, out_channels, bn_norm, stride=1, groups=1):
     super(Conv1x1, self).__init__()
     self.conv = nn.Conv2d(in_channels,
                           out_channels,
                           1,
                           stride=stride,
                           padding=0,
                           bias=False,
                           groups=groups)
     self.bn = get_norm(bn_norm, out_channels)
     self.relu = nn.ReLU(inplace=True)
Esempio n. 26
0
    def _make_layer(self, block, planes, blocks, stride=1, bn_norm="BN", num_splits=1, with_ibn=False,
                    dilation=1, dropblock_prob=0.0, is_first=True):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            down_layers = []
            if self.avg_down:
                if dilation == 1:
                    down_layers.append(nn.AvgPool2d(kernel_size=stride, stride=stride,
                                                    ceil_mode=True, count_include_pad=False))
                else:
                    down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1,
                                                    ceil_mode=True, count_include_pad=False))
                down_layers.append(nn.Conv2d(self.inplanes, planes * block.expansion,
                                             kernel_size=1, stride=1, bias=False))
            else:
                down_layers.append(nn.Conv2d(self.inplanes, planes * block.expansion,
                                             kernel_size=1, stride=stride, bias=False))
            down_layers.append(get_norm(bn_norm, planes * block.expansion, num_splits))
            downsample = nn.Sequential(*down_layers)

        layers = []
        if dilation == 1 or dilation == 2:
            layers.append(block(self.inplanes, planes, bn_norm, num_splits, with_ibn, stride, downsample=downsample,
                                radix=self.radix, cardinality=self.cardinality,
                                bottleneck_width=self.bottleneck_width,
                                avd=self.avd, avd_first=self.avd_first,
                                dilation=1, is_first=is_first, rectified_conv=self.rectified_conv,
                                rectify_avg=self.rectify_avg,
                                dropblock_prob=dropblock_prob,
                                last_gamma=self.last_gamma))
        elif dilation == 4:
            layers.append(block(self.inplanes, planes, bn_norm, num_splits, with_ibn, stride, downsample=downsample,
                                radix=self.radix, cardinality=self.cardinality,
                                bottleneck_width=self.bottleneck_width,
                                avd=self.avd, avd_first=self.avd_first,
                                dilation=2, is_first=is_first, rectified_conv=self.rectified_conv,
                                rectify_avg=self.rectify_avg,
                                dropblock_prob=dropblock_prob,
                                last_gamma=self.last_gamma))
        else:
            raise RuntimeError("=> unknown dilation size: {}".format(dilation))

        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, bn_norm, num_splits, with_ibn,
                                radix=self.radix, cardinality=self.cardinality,
                                bottleneck_width=self.bottleneck_width,
                                avd=self.avd, avd_first=self.avd_first,
                                dilation=dilation, rectified_conv=self.rectified_conv,
                                rectify_avg=self.rectify_avg,
                                dropblock_prob=dropblock_prob,
                                last_gamma=self.last_gamma))

        return nn.Sequential(*layers)
Esempio n. 27
0
 def construct(self, w_in, w_out, stride, bn_norm, bm, gw, se_r):
     # Compute the bottleneck width
     w_b = int(round(w_out * bm))
     # Compute the number of groups
     num_gs = w_b // gw
     # 1x1, BN, ReLU
     self.a = nn.Conv2d(w_in, w_b, kernel_size=1, stride=1, padding=0, bias=False)
     self.a_bn = get_norm(bn_norm, w_b)
     self.a_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)
     # 3x3, BN, ReLU
     self.b = nn.Conv2d(
         w_b, w_b, kernel_size=3, stride=stride, padding=1, groups=num_gs, bias=False
     )
     self.b_bn = get_norm(bn_norm, w_b)
     self.b_relu = nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE)
     # Squeeze-and-Excitation (SE)
     if se_r:
         w_se = int(round(w_in * se_r))
         self.se = SE(w_b, w_se)
     # 1x1, BN
     self.c = nn.Conv2d(w_b, w_out, kernel_size=1, stride=1, padding=0, bias=False)
     self.c_bn = get_norm(bn_norm, w_out)
     self.c_bn.final_bn = True
Esempio n. 28
0
 def __init__(self,
              inplanes,
              planes,
              bn_norm,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64,
              dilation=1):
     super().__init__()
     if groups != 1 or base_width != 64:
         raise ValueError(
             'BasicBlock only supports groups=1 and base_width=64')
     if dilation > 1:
         raise NotImplementedError(
             "Dilation > 1 not supported in BasicBlock")
     self.bn1 = get_norm(bn_norm, inplanes)
     self.conv1 = conv3x3(inplanes, planes)
     self.bn2 = get_norm(bn_norm, planes)
     self.prelu = nn.PReLU(planes)
     self.conv2 = conv3x3(planes, planes, stride)
     self.bn3 = get_norm(bn_norm, planes)
     self.downsample = downsample
     self.stride = stride
Esempio n. 29
0
 def __init__(self, in_channels, out_channels, bn_norm):
     super(LightConv3x3, self).__init__()
     self.conv1 = nn.Conv2d(
         in_channels, out_channels, 1, stride=1, padding=0, bias=False
     )
     self.conv2 = nn.Conv2d(
         out_channels,
         out_channels,
         3,
         stride=1,
         padding=1,
         bias=False,
         groups=out_channels
     )
     self.bn = get_norm(bn_norm, out_channels)
     self.relu = nn.ReLU(inplace=True)
Esempio n. 30
0
    def __init__(self, cfg):
        super().__init__()
        self._cfg = cfg
        assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)
        self.register_buffer(
            "pixel_mean",
            torch.tensor(cfg.MODEL.PIXEL_MEAN).view(1, -1, 1, 1))
        self.register_buffer(
            "pixel_std",
            torch.tensor(cfg.MODEL.PIXEL_STD).view(1, -1, 1, 1))

        # backbone
        self.backbone = build_backbone(cfg)

        # head
        self.heads = build_heads(cfg)

        self.has_extra_bn = cfg.MODEL.BACKBONE.EXTRA_BN
        if self.has_extra_bn:
            self.heads_extra_bn = get_norm(cfg.MODEL.BACKBONE.NORM,
                                           cfg.MODEL.BACKBONE.FEAT_DIM)