def _init_layers(self):
     self.levels = len(self.anchor_strides)
     self.relu = nn.ReLU(inplace=True)
     self.cls_convs = nn.ModuleList()
     self.reg_convs = nn.ModuleList()
     self.cls_bn_levels = nn.ModuleList()
     self.reg_bn_levels = nn.ModuleList()
     for i in range(self.stacked_convs):
         chn = self.in_channels if i == 0 else self.feat_channels
         self.cls_convs.append(
             SeparableConv(chn, self.feat_channels, bias=True, relu=False))
         cls_bn_level = nn.ModuleList()
         for l in range(self.levels):
             _, bn_layer = build_norm_layer(self.norm_cfg,
                                            self.feat_channels)
             cls_bn_level.append(nn.Sequential(bn_layer, Swish()))
         self.cls_bn_levels.append(cls_bn_level)
         self.reg_convs.append(
             SeparableConv(chn, self.feat_channels, bias=True, relu=False))
         reg_bn_level = nn.ModuleList()
         for l in range(self.levels):
             _, bn_layer = build_norm_layer(self.norm_cfg,
                                            self.feat_channels)
             reg_bn_level.append(nn.Sequential(bn_layer, Swish()))
         self.reg_bn_levels.append(reg_bn_level)
     self.retina_cls = SeparableConv(self.feat_channels,
                                     self.num_anchors *
                                     self.cls_out_channels,
                                     bias=True,
                                     relu=False)
     self.retina_reg = SeparableConv(self.feat_channels,
                                     self.num_anchors * 4,
                                     bias=True,
                                     relu=False)
Пример #2
0
 def _make_stem_layer(self, in_channels):
     self.conv1 = nn.Sequential(
         build_conv_layer(self.conv_cfg,
                          in_channels,
                          32,
                          kernel_size=3,
                          stride=2,
                          padding=1,
                          bias=False),
         build_norm_layer(self.norm_cfg, 32)[1],
         nn.ReLU(inplace=True),
         build_conv_layer(self.conv_cfg,
                          32,
                          32,
                          kernel_size=3,
                          stride=1,
                          padding=1,
                          bias=False),
         build_norm_layer(self.norm_cfg, 32)[1],
         nn.ReLU(inplace=True),
         build_conv_layer(self.conv_cfg,
                          32,
                          64,
                          kernel_size=3,
                          stride=1,
                          padding=1,
                          bias=False),
     )
     self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
     self.add_module(self.norm1_name, norm1)
     self.relu = nn.ReLU(inplace=True)
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
Пример #3
0
    def _make_fuse_layers(self):
        if self.num_branches == 1:
            return None

        num_branches = self.num_branches
        in_channels = self.in_channels
        fuse_layers = []
        num_out_branches = num_branches if self.multiscale_output else 1
        for i in range(num_out_branches):
            fuse_layer = []
            for j in range(num_branches):
                if j > i:
                    fuse_layer.append(
                        nn.Sequential(
                            build_conv_layer(self.conv_cfg,
                                             in_channels[j],
                                             in_channels[i],
                                             kernel_size=1,
                                             stride=1,
                                             padding=0,
                                             bias=False),
                            build_norm_layer(self.norm_cfg, in_channels[i])[1],
                            nn.Upsample(scale_factor=2**(j - i),
                                        mode='nearest')))
                elif j == i:
                    fuse_layer.append(None)
                else:
                    conv_downsamples = []
                    for k in range(i - j):
                        if k == i - j - 1:
                            conv_downsamples.append(
                                nn.Sequential(
                                    build_conv_layer(self.conv_cfg,
                                                     in_channels[j],
                                                     in_channels[i],
                                                     kernel_size=3,
                                                     stride=2,
                                                     padding=1,
                                                     bias=False),
                                    build_norm_layer(self.norm_cfg,
                                                     in_channels[i])[1]))
                        else:
                            conv_downsamples.append(
                                nn.Sequential(
                                    build_conv_layer(self.conv_cfg,
                                                     in_channels[j],
                                                     in_channels[j],
                                                     kernel_size=3,
                                                     stride=2,
                                                     padding=1,
                                                     bias=False),
                                    build_norm_layer(self.norm_cfg,
                                                     in_channels[j])[1],
                                    nn.ReLU(inplace=False)))
                    fuse_layer.append(nn.Sequential(*conv_downsamples))
            fuse_layers.append(nn.ModuleList(fuse_layer))

        return nn.ModuleList(fuse_layers)
Пример #4
0
Файл: hrnet.py Проект: zyg11/TSD
    def _make_transition_layer(self, num_channels_pre_layer,
                               num_channels_cur_layer):
        num_branches_cur = len(num_channels_cur_layer)
        num_branches_pre = len(num_channels_pre_layer)

        transition_layers = []
        for i in range(num_branches_cur):
            if i < num_branches_pre:
                if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
                    transition_layers.append(
                        nn.Sequential(
                            build_conv_layer(
                                self.conv_cfg,
                                num_channels_pre_layer[i],
                                num_channels_cur_layer[i],
                                kernel_size=3,
                                stride=1,
                                padding=1,
                                bias=False,
                            ),
                            build_norm_layer(self.norm_cfg,
                                             num_channels_cur_layer[i])[1],
                            nn.ReLU(inplace=True),
                        ))
                else:
                    transition_layers.append(None)
            else:
                conv_downsamples = []
                for j in range(i + 1 - num_branches_pre):
                    in_channels = num_channels_pre_layer[-1]
                    out_channels = (num_channels_cur_layer[i] if j == i -
                                    num_branches_pre else in_channels)
                    conv_downsamples.append(
                        nn.Sequential(
                            build_conv_layer(
                                self.conv_cfg,
                                in_channels,
                                out_channels,
                                kernel_size=3,
                                stride=2,
                                padding=1,
                                bias=False,
                            ),
                            build_norm_layer(self.norm_cfg, out_channels)[1],
                            nn.ReLU(inplace=True),
                        ))
                transition_layers.append(nn.Sequential(*conv_downsamples))

        return nn.ModuleList(transition_layers)
Пример #5
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None,
                 gcb=None,
                 gen_attention=None):
        super(BasicBlock, self).__init__()
        assert dcn is None, 'Not implemented yet.'
        assert gen_attention is None, 'Not implemented yet.'
        assert gcb is None, 'Not implemented yet.'

        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)

        self.activation = None
        if 'inplace' not in norm_cfg['type']:
            self.activation = nn.ReLU(inplace=True)
        else:
            norm2.activation = "identity"

        self.conv1 = build_conv_layer(conv_cfg,
                                      inplanes,
                                      planes,
                                      3,
                                      stride=stride,
                                      padding=dilation,
                                      dilation=dilation,
                                      bias=False)
        self.add_module(self.norm1_name, norm1)
        self.conv2 = build_conv_layer(conv_cfg,
                                      planes,
                                      planes,
                                      3,
                                      padding=1,
                                      bias=False)
        self.add_module(self.norm2_name, norm2)

        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
        assert not with_cp
Пример #6
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 num_ins,
                 conv_cfg=None,
                 norm_cfg=None,
                 separable_conv=True,
                 act_cfg=None,
                 eps=0.0001):
        super(WeightedInputConv_V2, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.act_cfg = act_cfg
        self.num_ins = num_ins
        self.eps = eps
        if separable_conv:
            _, bn_layer = build_norm_layer(norm_cfg, out_channels)
            self.conv_op = nn.Sequential(
                SeparableConv(in_channels, out_channels, bias=True,
                              relu=False), bn_layer)
        else:
            self.conv_op = ConvModule(in_channels,
                                      out_channels,
                                      3,
                                      padding=1,
                                      conv_cfg=None,
                                      norm_cfg=norm_cfg,
                                      act_cfg=None,
                                      inplace=False)

        # edge weight and swish
        self.weight = nn.Parameter(torch.Tensor(self.num_ins).fill_(1.0))
        self._swish = Swish()
Пример #7
0
    def _make_layer(self, block, inplanes, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                build_conv_layer(self.conv_cfg,
                                 inplanes,
                                 planes * block.expansion,
                                 kernel_size=1,
                                 stride=stride,
                                 bias=False),
                build_norm_layer(self.norm_cfg, planes * block.expansion)[1])

        layers = []
        layers.append(
            block(inplanes,
                  planes,
                  stride,
                  downsample=downsample,
                  with_cp=self.with_cp,
                  norm_cfg=self.norm_cfg,
                  conv_cfg=self.conv_cfg))
        inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(inplanes,
                      planes,
                      with_cp=self.with_cp,
                      norm_cfg=self.norm_cfg,
                      conv_cfg=self.conv_cfg))

        return nn.Sequential(*layers)
Пример #8
0
    def __init__(
            self,
            inplanes,
            planes,
            stride=1,
            dilation=1,
            downsample=None,
            style="pytorch",
            with_cp=False,
            conv_cfg=None,
            norm_cfg=dict(type="BN"),
            dcn=None,
            gcb=None,
            gen_attention=None,
    ):
        super(BasicBlock, self).__init__()
        assert dcn is None, "Not implemented yet."
        assert gen_attention is None, "Not implemented yet."
        assert gcb is None, "Not implemented yet."

        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)

        self.conv1 = build_conv_layer(
            conv_cfg,
            inplanes,
            planes,
            3,
            stride=stride,
            padding=dilation,
            dilation=dilation,
            bias=False,
        )
        self.add_module(self.norm1_name, norm1)
        self.conv2 = build_conv_layer(conv_cfg,
                                      planes,
                                      planes,
                                      3,
                                      padding=1,
                                      bias=False)
        self.add_module(self.norm2_name, norm2)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
        assert not with_cp
Пример #9
0
def make_res_layer(block,
                   inplanes,
                   planes,
                   blocks,
                   stride=1,
                   dilation=1,
                   style='pytorch',
                   with_cp=False,
                   conv_cfg=None,
                   norm_cfg=dict(type='BN'),
                   dcn=None,
                   gcb=None,
                   gen_attention=None,
                   gen_attention_blocks=[]):
    downsample = None
    if stride != 1 or inplanes != planes * block.expansion:
        downsample = nn.Sequential(
            build_conv_layer(conv_cfg,
                             inplanes,
                             planes * block.expansion,
                             kernel_size=1,
                             stride=stride,
                             bias=False),
            build_norm_layer(norm_cfg, planes * block.expansion)[1],
        )

    layers = []
    layers.append(
        block(inplanes=inplanes,
              planes=planes,
              stride=stride,
              dilation=dilation,
              downsample=downsample,
              style=style,
              with_cp=with_cp,
              conv_cfg=conv_cfg,
              norm_cfg=norm_cfg,
              dcn=dcn,
              gcb=gcb,
              gen_attention=gen_attention if
              (0 in gen_attention_blocks) else None))
    inplanes = planes * block.expansion
    for i in range(1, blocks):
        layers.append(
            block(inplanes=inplanes,
                  planes=planes,
                  stride=1,
                  dilation=dilation,
                  style=style,
                  with_cp=with_cp,
                  conv_cfg=conv_cfg,
                  norm_cfg=norm_cfg,
                  dcn=dcn,
                  gcb=gcb,
                  gen_attention=gen_attention if
                  (i in gen_attention_blocks) else None))

    return nn.Sequential(*layers)
Пример #10
0
 def _make_stem_layer(self, in_channels):
     conv1 = build_conv_layer(
         self.conv_cfg,
         in_channels,
         64,
         kernel_size=7,
         stride=2,
         padding=3,
         bias=False)
     self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
     relu = nn.ReLU(inplace=True)
     maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
     self.layer0 = nn.Sequential(*[conv1, norm1, relu, maxpool])
Пример #11
0
    def __init__(self, block_args, global_params, norm_cfg):
        super().__init__()
        self._block_args = block_args
        self._bn_mom = 1 - global_params.batch_norm_momentum
        self._bn_eps = global_params.batch_norm_epsilon
        self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
        self.id_skip = block_args.id_skip  # skip connection and drop connect

        # Get static or dynamic convolution depending on image size
        Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)

        # Expansion phase
        inp = self._block_args.input_filters  # number of input channels
        oup = self._block_args.input_filters * self._block_args.expand_ratio  # number of output channels
        if self._block_args.expand_ratio != 1:
            self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
            self._bn0 = build_norm_layer(norm_cfg, num_features=oup, postfix=0)[1]

        # Depthwise convolution phase
        k = self._block_args.kernel_size
        s = self._block_args.stride
        self._depthwise_conv = Conv2d(
            in_channels=oup, out_channels=oup, groups=oup,  # groups makes it depthwise
            kernel_size=k, stride=s, bias=False)
        self._bn1 = build_norm_layer(norm_cfg, num_features=oup, postfix=1)[1]

        # Squeeze and Excitation layer, if desired
        if self.has_se:
            num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
            self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
            self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)

        # Output phase
        final_oup = self._block_args.output_filters
        self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
        self._bn2 = build_norm_layer(norm_cfg, num_features=final_oup, postfix=2)[1]
        self._swish = MemoryEfficientSwish()
Пример #12
0
    def _make_stem_layer(self, in_channels):
        self.conv1 = build_conv_layer(self.conv_cfg,
                                      in_channels,
                                      64,
                                      kernel_size=7,
                                      stride=2,
                                      padding=3,
                                      bias=False)
        self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
        self.add_module(self.norm1_name, norm1)

        self.activation = None
        if 'InPlace' not in self.norm_cfg['type']:
            self.activation = nn.ReLU(inplace=True)

        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
Пример #13
0
    def _make_one_branch(self,
                         branch_index,
                         block,
                         num_blocks,
                         num_channels,
                         stride=1,
                         dcn=None,
                         gcb=None):
        downsample = None
        if stride != 1 or \
                self.in_channels[branch_index] != \
                num_channels[branch_index] * block.expansion:
            downsample = nn.Sequential(
                build_conv_layer(self.conv_cfg,
                                 self.in_channels[branch_index],
                                 num_channels[branch_index] * block.expansion,
                                 kernel_size=1,
                                 stride=stride,
                                 bias=False),
                build_norm_layer(self.norm_cfg, num_channels[branch_index] *
                                 block.expansion)[1])

        layers = []
        layers.append(
            block(self.in_channels[branch_index],
                  num_channels[branch_index],
                  stride,
                  downsample=downsample,
                  with_cp=self.with_cp,
                  norm_cfg=self.norm_cfg,
                  conv_cfg=self.conv_cfg,
                  dcn=dcn,
                  gcb=gcb), )
        self.in_channels[branch_index] = \
            num_channels[branch_index] * block.expansion
        for i in range(1, num_blocks[branch_index]):
            layers.append(
                block(self.in_channels[branch_index],
                      num_channels[branch_index],
                      with_cp=self.with_cp,
                      norm_cfg=self.norm_cfg,
                      conv_cfg=self.conv_cfg,
                      dcn=dcn,
                      gcb=gcb))

        return nn.Sequential(*layers)
Пример #14
0
    def __init__(self,
                 num_repeat,
                 connect_norm_eval,
                 use_act=True,
                 backbone_type='ResNet',
                 **backbone_args):
        super(CBNet, self).__init__()

        assert backbone_type in self.arch_cfg
        self.backbone_type = backbone_type
        self.num_repeat = num_repeat
        self.connect_norm_eval = connect_norm_eval
        self.backbone_names = []
        self.connect_op_names = []
        self.use_act = use_act
        assert num_repeat >= 2
        for i in range(1, num_repeat + 1):
            backbone = self.arch_cfg[backbone_type](**backbone_args)
            backbone_name = 'cb{}'.format(i)
            self.add_module(backbone_name, backbone)
            self.backbone_names.append(backbone_name)

        left_out_channels = [256, 512, 1024, 2048]
        right_in_channels = [64, 256, 512, 1024]
        for i, _ in enumerate(left_out_channels):
            conv = build_conv_layer(backbone.conv_cfg,
                                    left_out_channels[i],
                                    right_in_channels[i],
                                    kernel_size=1,
                                    padding=0,
                                    stride=1,
                                    bias=False)
            # constant_init(conv, 0)
            _, norm = build_norm_layer(backbone.norm_cfg, right_in_channels[i])
            # BatchNorm
            # default value of running_mean is 0
            # default value of running_var is 1
            # default weight is 1
            # default bias is 0
            # default ((x - 0)/1)* weight + bias = x,
            # in eval mode, running_mean and running_var will not be updated.
            # if requires_grad, weight and bias will be updated.
            connect_op = nn.Sequential(conv, norm)
            connect_op_name = 'connect_op{}'.format(i + 1)
            self.add_module(connect_op_name, connect_op)
            self.connect_op_names.append(connect_op_name)
Пример #15
0
    def _make_dynamic_gate_layers(self, num_backbones=1):
        if self.num_branches < 4:
            return None

        num_branches = self.num_branches
        gate_num = num_branches
        in_channels = self.in_channels
        gate_layers_backone = []
        for num_backbone in range(num_backbones):
            gate_layers = []
            for i in range(num_branches):
                # if i==0 or i==num_branches-1:   # only keep and down
                #     gate_num=2
                # else:
                #     gate_num=3
                gate_layers.append(
                    nn.Sequential(
                        build_conv_layer(
                            self.conv_cfg,
                            in_channels[i],
                            in_channels[i] // 2,  # //2
                            kernel_size=1,
                            stride=1,
                            padding=0,
                            bias=False),
                        build_norm_layer(self.norm_cfg,
                                         in_channels[i] // 2)[1],  # //2
                        nn.ReLU(inplace=False),
                        nn.AdaptiveAvgPool2d((1, 1)),
                        build_conv_layer(
                            self.conv_cfg,
                            in_channels[i] // 2,  # //2
                            gate_num,
                            kernel_size=1,
                            stride=1,
                            padding=0,
                            bias=True)))
            gate_layers_backone.append(nn.ModuleList(gate_layers))
        return nn.ModuleList(gate_layers_backone)
Пример #16
0
    def _make_fuse_layers(self):
        if self.num_branches == 1:
            return None

        num_branches = self.num_branches
        in_channels = self.in_channels
        fuse_layers = []
        num_out_branches = num_branches if self.multiscale_output else 1
        for i in range(num_out_branches):  # i指融合后,j指融合前
            fuse_layer = []
            for j in range(num_branches):
                if j > i:  # resolution-up and dim-down 1x1降维再升高分辨率
                    fuse_layer.append(
                        nn.Sequential(
                            build_conv_layer(self.conv_cfg,
                                             in_channels[j],
                                             in_channels[i],
                                             kernel_size=1,
                                             stride=1,
                                             padding=0,
                                             bias=False),
                            build_norm_layer(self.norm_cfg, in_channels[i])[1],
                            nn.Upsample(scale_factor=2**(j - i),
                                        mode='nearest')))
                elif j == i:  # 同层直接运算
                    fuse_layer.append(None)
                else:  # j<i
                    conv_downsamples = []
                    for k in range(i - j):
                        if k == i - j - 1:
                            conv_downsamples.append(
                                nn.Sequential(
                                    build_conv_layer(  # 3x3 conv+stride=2
                                        self.conv_cfg,
                                        in_channels[j],
                                        in_channels[i],
                                        kernel_size=3,
                                        stride=2,
                                        padding=1,
                                        bias=False),
                                    build_norm_layer(self.norm_cfg,
                                                     in_channels[i])[1]))
                        else:
                            conv_downsamples.append(
                                nn.Sequential(
                                    build_conv_layer(
                                        self.conv_cfg,
                                        in_channels[j],
                                        in_channels[
                                            j],  # downsample but dim not change -> lose some info
                                        kernel_size=3,
                                        stride=2,
                                        padding=1,
                                        bias=False),
                                    build_norm_layer(self.norm_cfg,
                                                     in_channels[j])[1],
                                    nn.ReLU(inplace=False)))
                    fuse_layer.append(nn.Sequential(*conv_downsamples))
            fuse_layers.append(nn.ModuleList(fuse_layer))

        return nn.ModuleList(fuse_layers)
Пример #17
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None,
                 gcb=None,
                 gen_attention=None):
        """Bottleneck block for ResNet.

        If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
        it is "caffe", the stride-two layer is the first 1x1 conv layer.
        """
        super(Bottleneck, self).__init__()
        assert style in ['pytorch', 'caffe']
        assert dcn is None or isinstance(dcn, dict)
        assert gcb is None or isinstance(gcb, dict)
        assert gen_attention is None or isinstance(gen_attention, dict)

        self.inplanes = inplanes
        self.planes = planes
        self.stride = stride
        self.dilation = dilation
        self.style = style
        self.with_cp = with_cp
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.dcn = dcn
        self.with_dcn = dcn is not None
        self.gcb = gcb
        self.with_gcb = gcb is not None
        self.gen_attention = gen_attention
        self.with_gen_attention = gen_attention is not None

        if self.style == 'pytorch':
            self.conv1_stride = 1
            self.conv2_stride = stride
        else:
            self.conv1_stride = stride
            self.conv2_stride = 1

        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
        self.norm3_name, norm3 = build_norm_layer(norm_cfg,
                                                  planes * self.expansion,
                                                  postfix=3)

        self.conv1 = build_conv_layer(conv_cfg,
                                      inplanes,
                                      planes,
                                      kernel_size=1,
                                      stride=self.conv1_stride,
                                      bias=False)
        self.add_module(self.norm1_name, norm1)
        fallback_on_stride = False
        if self.with_dcn:
            fallback_on_stride = dcn.pop('fallback_on_stride', False)
        if not self.with_dcn or fallback_on_stride:
            self.conv2 = build_conv_layer(conv_cfg,
                                          planes,
                                          planes,
                                          kernel_size=3,
                                          stride=self.conv2_stride,
                                          padding=dilation,
                                          dilation=dilation,
                                          bias=False)
        else:
            assert self.conv_cfg is None, 'conv_cfg cannot be None for DCN'
            self.conv2 = build_conv_layer(dcn,
                                          planes,
                                          planes,
                                          kernel_size=3,
                                          stride=self.conv2_stride,
                                          padding=dilation,
                                          dilation=dilation,
                                          bias=False)

        self.add_module(self.norm2_name, norm2)
        self.conv3 = build_conv_layer(conv_cfg,
                                      planes,
                                      planes * self.expansion,
                                      kernel_size=1,
                                      bias=False)
        self.add_module(self.norm3_name, norm3)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample

        if self.with_gcb:
            gcb_inplanes = planes * self.expansion
            self.context_block = ContextBlock(inplanes=gcb_inplanes, **gcb)

        # gen_attention
        if self.with_gen_attention:
            self.gen_attention_block = GeneralizedAttention(
                planes, **gen_attention)
Пример #18
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None,
                 gcb=None,
                 gen_attention=None):
        # super(BasicBlockPlugins, self).__init__()
        super().__init__()
        assert not with_cp
        self.dcn = dcn
        self.with_dcn = dcn is not None
        self.gcb = gcb
        self.with_gcb = gcb is not None
        # self.gen_attention = gen_attention
        self.with_gen_attention = gen_attention is not None
        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
        fallback_on_stride = False
        if self.with_dcn:
            fallback_on_stride = dcn.pop('fallback_on_stride', False)
        if not self.with_dcn or fallback_on_stride:
            self.conv1 = build_conv_layer(conv_cfg,
                                          inplanes,
                                          planes,
                                          3,
                                          stride=stride,
                                          padding=dilation,
                                          dilation=dilation,
                                          bias=False)
        else:
            assert conv_cfg is None
            self.conv1 = build_conv_layer(dcn,
                                          inplanes,
                                          planes,
                                          kernel_size=3,
                                          stride=stride,
                                          padding=dilation,
                                          dilation=dilation,
                                          bias=False)
        self.add_module(self.norm1_name, norm1)

        self.conv2 = build_conv_layer(conv_cfg,
                                      planes,
                                      planes,
                                      3,
                                      padding=1,
                                      bias=False)
        self.add_module(self.norm2_name, norm2)
        self.relu = nn.ReLU(inplace=True)
        if self.with_gcb:
            self.context_block = ContextBlock(inplanes=planes, **gcb)
Пример #19
0
    def __init__(self, inplanes, planes, groups=1, base_width=4, **kwargs):
        """Bottleneck block for ResNeXt.

        If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
        it is "caffe", the stride-two layer is the first 1x1 conv layer.
        """
        super(Bottleneck, self).__init__(inplanes, planes, **kwargs)

        if groups == 1:
            width = self.planes
        else:
            width = math.floor(self.planes * (base_width / 64)) * groups

        self.norm1_name, norm1 = build_norm_layer(self.norm_cfg,
                                                  width,
                                                  postfix=1)
        self.norm2_name, norm2 = build_norm_layer(self.norm_cfg,
                                                  width,
                                                  postfix=2)
        self.norm3_name, norm3 = build_norm_layer(self.norm_cfg,
                                                  self.planes * self.expansion,
                                                  postfix=3)

        self.conv1 = build_conv_layer(self.conv_cfg,
                                      self.inplanes,
                                      width,
                                      kernel_size=1,
                                      stride=self.conv1_stride,
                                      bias=False)
        self.add_module(self.norm1_name, norm1)
        fallback_on_stride = False
        self.with_modulated_dcn = False
        if self.with_dcn:
            fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
        if not self.with_dcn or fallback_on_stride:
            self.conv2 = build_conv_layer(self.conv_cfg,
                                          width,
                                          width,
                                          kernel_size=3,
                                          stride=self.conv2_stride,
                                          padding=self.dilation,
                                          dilation=self.dilation,
                                          groups=groups,
                                          bias=False)
        else:
            assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
            self.conv2 = build_conv_layer(self.dcn,
                                          width,
                                          width,
                                          kernel_size=3,
                                          stride=self.conv2_stride,
                                          padding=self.dilation,
                                          dilation=self.dilation,
                                          groups=groups,
                                          bias=False)

        self.add_module(self.norm2_name, norm2)
        self.conv3 = build_conv_layer(self.conv_cfg,
                                      width,
                                      self.planes * self.expansion,
                                      kernel_size=1,
                                      bias=False)
        self.add_module(self.norm3_name, norm3)
Пример #20
0
    def __init__(self,
                 arch='efficientnet-b0',
                 out_levels=[3, 4, 5],
                 norm_cfg=dict(type='SyncBN',
                               momentum=0.01,
                               eps=1e-3,
                               requires_grad=False),
                 norm_eval=True,
                 override_params=None):
        super(EfficientNet, self).__init__()
        self._check_model_name_is_valid(arch)
        blocks_args, global_params = get_model_params(arch, override_params)
        assert isinstance(blocks_args, list), 'blocks_args should be a list'
        assert len(blocks_args) > 0, 'block args must be greater than 0'
        self._global_params = global_params
        self._blocks_args = blocks_args
        self.norm_eval = norm_eval
        self.out_levels = out_levels
        self.norm_cfg = norm_cfg

        # Get stem static or dynamic convolution depending on image size
        image_size = global_params.image_size
        Conv2d = get_same_padding_conv2d(image_size=image_size)

        # Batch norm parameters
        bn_mom = 1 - self._global_params.batch_norm_momentum
        bn_eps = self._global_params.batch_norm_epsilon

        # Stem
        in_channels = 3  # rgb
        out_channels = round_filters(
            32, self._global_params)  # number of output channels
        self._conv_stem = Conv2d(in_channels,
                                 out_channels,
                                 kernel_size=3,
                                 stride=2,
                                 bias=False)
        # self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
        _, self._bn0 = build_norm_layer(norm_cfg, out_channels)
        image_size = calculate_output_image_size(image_size, 2)

        # Build blocks
        self._blocks = nn.ModuleList([])
        for block_args in self._blocks_args:
            # Update block input and output filters based on depth multiplier.
            block_args = block_args._replace(
                input_filters=round_filters(block_args.input_filters,
                                            self._global_params),
                output_filters=round_filters(block_args.output_filters,
                                             self._global_params),
                num_repeat=round_repeats(block_args.num_repeat,
                                         self._global_params))

            # The first block needs to take care of stride and filter size increase.
            self._blocks.append(
                MBConvBlock(block_args,
                            self._global_params,
                            image_size=image_size,
                            norm_cfg=self.norm_cfg))
            image_size = calculate_output_image_size(image_size,
                                                     block_args.stride)
            if block_args.num_repeat > 1:  # modify block_args to keep same output size
                block_args = block_args._replace(
                    input_filters=block_args.output_filters, stride=1)
            for _ in range(block_args.num_repeat - 1):
                self._blocks.append(
                    MBConvBlock(block_args,
                                self._global_params,
                                image_size=image_size,
                                norm_cfg=self.norm_cfg))
            # last_stage_idx

        self._swish = MemoryEfficientSwish()
Пример #21
0
    def __init__(self,
                 block_args,
                 global_params,
                 image_size=None,
                 norm_cfg=dict(type="BN",
                               momentum=0.01,
                               eps=1e-3,
                               requires_grad=False)):
        super().__init__()
        self._block_args = block_args
        # print(block_args)
        self._bn_mom = 1 - global_params.batch_norm_momentum  # pytorch's difference from tensorflow
        self._bn_eps = global_params.batch_norm_epsilon
        self.has_se = (self._block_args.se_ratio
                       is not None) and (0 < self._block_args.se_ratio <= 1)
        self.id_skip = block_args.id_skip  # whether to use skip connection and drop connect
        self.norm_cfg = norm_cfg

        # Expansion phase (Inverted Bottleneck)
        inp = self._block_args.input_filters  # number of input channels
        oup = self._block_args.input_filters * self._block_args.expand_ratio  # number of output channels
        if self._block_args.expand_ratio != 1:
            Conv2d = get_same_padding_conv2d(image_size=image_size)
            self._expand_conv = Conv2d(in_channels=inp,
                                       out_channels=oup,
                                       kernel_size=1,
                                       bias=False)
            # self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
            _, self._bn0 = build_norm_layer(norm_cfg, oup)

            # image_size = calculate_output_image_size(image_size, 1) <-- this wouldn't modify image_size

        # Depthwise convolution phase
        k = self._block_args.kernel_size
        s = self._block_args.stride
        Conv2d = get_same_padding_conv2d(image_size=image_size)
        self._depthwise_conv = Conv2d(
            in_channels=oup,
            out_channels=oup,
            groups=oup,  # groups makes it depthwise
            kernel_size=k,
            stride=s,
            bias=False)
        # self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
        _, self._bn1 = build_norm_layer(norm_cfg, oup)
        image_size = calculate_output_image_size(image_size, s)

        # Squeeze and Excitation layer, if desired
        if self.has_se:
            Conv2d = get_same_padding_conv2d(image_size=(1, 1))
            num_squeezed_channels = max(
                1,
                int(self._block_args.input_filters *
                    self._block_args.se_ratio))
            self._se_reduce = Conv2d(in_channels=oup,
                                     out_channels=num_squeezed_channels,
                                     kernel_size=1)
            self._se_expand = Conv2d(in_channels=num_squeezed_channels,
                                     out_channels=oup,
                                     kernel_size=1)

        # Pointwise convolution phase
        final_oup = self._block_args.output_filters
        Conv2d = get_same_padding_conv2d(image_size=image_size)
        self._project_conv = Conv2d(in_channels=oup,
                                    out_channels=final_oup,
                                    kernel_size=1,
                                    bias=False)
        # self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
        _, self._bn2 = build_norm_layer(norm_cfg, final_oup)
        self._swish = MemoryEfficientSwish()
Пример #22
0
    def __init__(self,
                 extra,
                 in_channels=3,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 norm_eval=True,
                 with_cp=False,
                 zero_init_residual=False,
                 with_dcn=None,
                 with_gcb=None,
                 dcn=None,
                 gcb=None):
        super(HRNet, self).__init__()
        self.extra = extra
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.norm_eval = norm_eval
        self.with_cp = with_cp
        self.zero_init_residual = zero_init_residual

        # stem net
        self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)

        self.conv1 = build_conv_layer(self.conv_cfg,
                                      in_channels,
                                      64,
                                      kernel_size=3,
                                      stride=2,
                                      padding=1,
                                      bias=False)

        self.add_module(self.norm1_name, norm1)
        self.conv2 = build_conv_layer(self.conv_cfg,
                                      64,
                                      64,
                                      kernel_size=3,
                                      stride=2,
                                      padding=1,
                                      bias=False)

        self.add_module(self.norm2_name, norm2)
        self.relu = nn.ReLU(inplace=True)

        # stage 1
        self.stage1_cfg = self.extra['stage1']
        num_channels = self.stage1_cfg['num_channels'][0]
        block_type = self.stage1_cfg['block']
        num_blocks = self.stage1_cfg['num_blocks'][0]

        block = self.blocks_dict[block_type]
        stage1_out_channels = num_channels * block.expansion
        self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)

        # stage 2
        self.stage2_cfg = self.extra['stage2']
        num_channels = self.stage2_cfg['num_channels']
        block_type = self.stage2_cfg['block']

        block = self.blocks_dict[block_type]
        num_channels = [channel * block.expansion for channel in num_channels]
        self.transition1 = self._make_transition_layer([stage1_out_channels],
                                                       num_channels)
        self.stage2, pre_stage_channels = self._make_stage(
            self.stage2_cfg, num_channels)

        # stage 3
        self.stage3_cfg = self.extra['stage3']
        num_channels = self.stage3_cfg['num_channels']
        block_type = self.stage3_cfg['block']

        block = self.blocks_dict[block_type]
        num_channels = [channel * block.expansion for channel in num_channels]
        self.transition2 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels)
        self.stage3, pre_stage_channels = self._make_stage(
            self.stage3_cfg, num_channels)

        # stage 4
        self.stage4_cfg = self.extra['stage4']
        num_channels = self.stage4_cfg['num_channels']
        block_type = self.stage4_cfg['block']

        block = self.blocks_dict[block_type]
        num_channels = [channel * block.expansion for channel in num_channels]
        self.transition3 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels)
        dcn_branches_repeats, gcb_branches_repeats = None, None
        if with_dcn is not None:
            assert len(with_dcn) == self.stage4_cfg['num_modules']
            dcn_branches_repeats = [
                [] for _ in range(self.stage4_cfg['num_modules'])
            ]
            for repeatid, dcn_branches in enumerate(with_dcn):
                assert len(dcn_branches) == self.stage4_cfg['num_branches']
                for use_dcn in dcn_branches:
                    if use_dcn is False:
                        dcn_branches_repeats[repeatid].append(None)
                    else:
                        assert isinstance(dcn, dict)
                        dcn_branches_repeats[repeatid].append(dcn)
        if with_gcb is not None:
            assert len(with_gcb) == self.stage4_cfg['num_modules']
            gcb_branches_repeats = [
                [] for _ in range(self.stage4_cfg['num_modules'])
            ]
            for repeatid, gcb_branches in enumerate(gcb_branches_repeats):
                assert len(gcb_branches) == self.stage4_cfg['num_branches']
                for use_gcb in gcb_branches:
                    if use_gcb is False:
                        gcb_branches_repeats[repeatid].append(None)
                    else:
                        assert isinstance(gcb, dict)
                        gcb_branches_repeats[repeatid].append(gcb)

        self.stage4, pre_stage_channels = self._make_stage(
            self.stage4_cfg,
            num_channels,
            dcn_branches_repeats=dcn_branches_repeats,
            gcb_branches_repeats=gcb_branches_repeats)  # NOTE
Пример #23
0
def make_res_layer(block,
                   inplanes,
                   planes,
                   blocks,
                   stride=1,
                   dilation=1,
                   style='pytorch',
                   with_cp=False,
                   conv_cfg=None,
                   norm_cfg=dict(type='BN'),
                   dcn=None,
                   gcb=None,
                   gen_attention=None,
                   gen_attention_blocks=[],
                   scale=4,
                   baseWidth=26):
    downsample = None
    if stride != 1 or inplanes != planes * block.expansion:
        downsample = nn.Sequential(
            nn.AvgPool2d(kernel_size=stride,
                         stride=stride,
                         ceil_mode=True,
                         count_include_pad=False),
            build_conv_layer(conv_cfg,
                             inplanes,
                             planes * block.expansion,
                             kernel_size=1,
                             stride=1,
                             bias=False),
            build_norm_layer(norm_cfg, planes * block.expansion)[1],
        )

    layers = []
    layers.append(
        block(inplanes=inplanes,
              planes=planes,
              stride=stride,
              dilation=dilation,
              downsample=downsample,
              style=style,
              with_cp=with_cp,
              conv_cfg=conv_cfg,
              norm_cfg=norm_cfg,
              dcn=dcn,
              gcb=gcb,
              gen_attention=gen_attention if
              (0 in gen_attention_blocks) else None,
              scale=scale,
              baseWidth=baseWidth,
              stype='stage'))
    inplanes = planes * block.expansion
    for i in range(1, blocks):
        layers.append(
            block(inplanes=inplanes,
                  planes=planes,
                  stride=1,
                  dilation=dilation,
                  style=style,
                  with_cp=with_cp,
                  conv_cfg=conv_cfg,
                  norm_cfg=norm_cfg,
                  dcn=dcn,
                  gcb=gcb,
                  gen_attention=gen_attention if
                  (i in gen_attention_blocks) else None,
                  scale=scale,
                  baseWidth=baseWidth))

    return nn.Sequential(*layers)
Пример #24
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None,
                 gcb=None,
                 gen_attention=None,
                 scale=4,
                 baseWidth=26,
                 stype='normal'):
        """Bottle2neck block for Res2Net.
        If style is "pytorch", the stride-two layer is the 3x3 conv layer,
        if it is "caffe", the stride-two layer is the first 1x1 conv layer.
        """
        super(Bottle2neck, self).__init__()
        assert style in ['pytorch', 'caffe']
        assert dcn is None or isinstance(dcn, dict)
        assert gcb is None or isinstance(gcb, dict)
        assert gen_attention is None or isinstance(gen_attention, dict)

        width = int(math.floor(planes * (baseWidth / 64.0)))
        self.inplanes = inplanes
        self.planes = planes
        self.stride = stride
        self.dilation = dilation
        self.style = style
        self.with_cp = with_cp
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.dcn = dcn
        self.with_dcn = dcn is not None
        self.gcb = gcb
        self.with_gcb = gcb is not None
        self.gen_attention = gen_attention
        self.with_gen_attention = gen_attention is not None

        if self.style == 'pytorch':
            self.conv1_stride = 1
            self.conv2_stride = stride
        else:
            self.conv1_stride = stride
            self.conv2_stride = 1

        self.norm1_name, norm1 = build_norm_layer(norm_cfg,
                                                  width * scale,
                                                  postfix=1)
        self.norm3_name, norm3 = build_norm_layer(norm_cfg,
                                                  planes * self.expansion,
                                                  postfix=3)

        self.conv1 = build_conv_layer(conv_cfg,
                                      inplanes,
                                      width * scale,
                                      kernel_size=1,
                                      stride=self.conv1_stride,
                                      bias=False)
        self.add_module(self.norm1_name, norm1)

        if scale == 1:
            self.nums = 1
        else:
            self.nums = scale - 1
        if stype == 'stage':
            self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)
        convs = []
        bns = []

        fallback_on_stride = False
        if self.with_dcn:
            fallback_on_stride = dcn.pop('fallback_on_stride', False)
        if not self.with_dcn or fallback_on_stride:
            for i in range(self.nums):
                convs.append(
                    build_conv_layer(conv_cfg,
                                     width,
                                     width,
                                     kernel_size=3,
                                     stride=self.conv2_stride,
                                     padding=dilation,
                                     dilation=dilation,
                                     bias=False))
                bns.append(build_norm_layer(norm_cfg, width, postfix=i + 1)[1])
            self.convs = nn.ModuleList(convs)
            self.bns = nn.ModuleList(bns)
        else:
            assert self.conv_cfg is None, 'conv_cfg cannot be None for DCN'
            for i in range(self.nums):
                convs.append(
                    build_conv_layer(dcn,
                                     width,
                                     width,
                                     kernel_size=3,
                                     stride=self.conv2_stride,
                                     padding=dilation,
                                     dilation=dilation,
                                     bias=False))
                bns.append(build_norm_layer(norm_cfg, width, postfix=i + 1)[1])
            self.convs = nn.ModuleList(convs)
            self.bns = nn.ModuleList(bns)

        self.conv3 = build_conv_layer(conv_cfg,
                                      width * scale,
                                      planes * self.expansion,
                                      kernel_size=1,
                                      bias=False)
        self.add_module(self.norm3_name, norm3)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stype = stype
        self.scale = scale
        self.width = width

        if self.with_gcb:
            gcb_inplanes = planes * self.expansion
            self.context_block = ContextBlock(inplanes=gcb_inplanes, **gcb)

        # gen_attention
        if self.with_gen_attention:
            self.gen_attention_block = GeneralizedAttention(
                planes, **gen_attention)
Пример #25
0
    def __init__(self,
                 depth,
                 in_channels=3,
                 num_stages=4,
                 strides=(1, 2, 2, 2),
                 dilations=(1, 1, 1, 1),
                 out_indices=(0, 1, 2, 3),
                 style='pytorch',
                 frozen_stages=-1,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN', requires_grad=True),
                 norm_eval=True,
                 dcn=None,
                 stage_with_dcn=(False, False, False, False),
                 gcb=None,
                 stage_with_gcb=(False, False, False, False),
                 gen_attention=None,
                 stage_with_gen_attention=((), (), (), ()),
                 with_cp=False,
                 zero_init_residual=True):
        super(TB_ResNet, self).__init__()
        if depth not in self.arch_settings:
            raise KeyError('invalid depth {} for resnet'.format(depth))
        self.depth = depth
        self.num_stages = num_stages
        assert num_stages >= 1 and num_stages <= 4
        self.strides = strides
        self.dilations = dilations
        assert len(strides) == len(dilations) == num_stages
        self.out_indices = out_indices
        assert max(out_indices) < num_stages
        self.style = style
        self.frozen_stages = frozen_stages
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.with_cp = with_cp
        self.norm_eval = norm_eval
        self.dcn = dcn
        self.stage_with_dcn = stage_with_dcn
        if dcn is not None:
            assert len(stage_with_dcn) == num_stages
        self.gen_attention = gen_attention
        self.gcb = gcb
        self.stage_with_gcb = stage_with_gcb
        if gcb is not None:
            assert len(stage_with_gcb) == num_stages
        self.zero_init_residual = zero_init_residual
        self.block, stage_blocks = self.arch_settings[depth]
        self.stage_blocks = stage_blocks[:num_stages]
        self.inplanes = 64

        self._make_stem_layer(in_channels)

        self.res_layers = []

        ### add db modules
        self.second_res_layers = []
        self.eb_second_conv1 = nn.Conv2d(
                256,
                64,
                kernel_size=1,
                stride=1,
                padding=0,
                dilation=1, bias=False)
        self.eb_second_conv2 = nn.Conv2d(
            512,
            256,
            kernel_size=1,
            stride=1,
            padding=0,
            dilation=1, bias=False)
        self.eb_second_conv3 = nn.Conv2d(
            1024,
            512,
            kernel_size=1,
            stride=1,
            padding=0,
            dilation=1, bias=False)
        self.eb_second_conv4 = nn.Conv2d(
            2048,
            1024,
            kernel_size=1,
            stride=1,
            padding=0,
            dilation=1, bias=False)

        #### add tb modules
        self.third_res_layers = []
        self.eb_third_conv1 = nn.Conv2d(
                256,
                64,
                kernel_size=1,
                stride=1,
                padding=0,
                dilation=1, bias=False)
        self.eb_third_conv2 = nn.Conv2d(
            512,
            256,
            kernel_size=1,
            stride=1,
            padding=0,
            dilation=1, bias=False)
        self.eb_third_conv3 = nn.Conv2d(
            1024,
            512,
            kernel_size=1,
            stride=1,
            padding=0,
            dilation=1, bias=False)
        self.eb_third_conv4 = nn.Conv2d(
            2048,
            1024,
            kernel_size=1,
            stride=1,
            padding=0,
            dilation=1, bias=False)

        ######### add bn for db
        normalize = self.norm_cfg

        self.eb_second_bn1_name, eb_second_bn1 = build_norm_layer(normalize, 64, postfix='eb_second_bn1')
        self.add_module(self.eb_second_bn1_name, eb_second_bn1)

        self.eb_second_bn2_name, eb_second_bn2 = build_norm_layer(normalize, 256, postfix='eb_second_bn2')
        self.add_module(self.eb_second_bn2_name, eb_second_bn2)

        self.eb_second_bn3_name, eb_second_bn3 = build_norm_layer(normalize, 512, postfix='eb_second_bn3')
        self.add_module(self.eb_second_bn3_name, eb_second_bn3)

        self.eb_second_bn4_name, eb_second_bn4 = build_norm_layer(normalize, 1024, postfix='eb_second_bn4')
        self.add_module(self.eb_second_bn4_name, eb_second_bn4)

        ######### add bn for tb

        self.eb_third_bn1_name, eb_third_bn1 = build_norm_layer(normalize, 64, postfix='eb_third_bn1')
        self.add_module(self.eb_third_bn1_name, eb_third_bn1)

        self.eb_third_bn2_name, eb_third_bn2 = build_norm_layer(normalize, 256, postfix='eb_third_bn2')
        self.add_module(self.eb_third_bn2_name, eb_third_bn2)

        self.eb_third_bn3_name, eb_third_bn3 = build_norm_layer(normalize, 512, postfix='eb_third_bn3')
        self.add_module(self.eb_third_bn3_name, eb_third_bn3)

        self.eb_third_bn4_name, eb_third_bn4 = build_norm_layer(normalize, 1024, postfix='eb_third_bn4')
        self.add_module(self.eb_third_bn4_name, eb_third_bn4)

        ###### upsample in db
        self.eb_upsample = nn.Upsample(
            scale_factor=2, mode='nearest')


        for i, num_blocks in enumerate(self.stage_blocks):
            stride = strides[i]
            dilation = dilations[i]
            dcn = self.dcn if self.stage_with_dcn[i] else None
            gcb = self.gcb if self.stage_with_gcb[i] else None
            planes = 64 * 2**i
            res_layer = tb_make_res_layer(
                self.block,
                self.inplanes,
                planes,
                num_blocks,
                stride=stride,
                dilation=dilation,
                style=self.style,
                with_cp=with_cp,
                conv_cfg=conv_cfg,
                norm_cfg=norm_cfg,
                dcn=dcn,
                gcb=gcb,
                gen_attention=gen_attention,
                gen_attention_blocks=stage_with_gen_attention[i])

            second_res_layer = tb_make_res_layer(
                self.block,
                self.inplanes,
                planes,
                num_blocks,
                stride=stride,
                dilation=dilation,
                style=self.style,
                with_cp=with_cp,
                conv_cfg=conv_cfg,
                norm_cfg=norm_cfg,
                dcn=dcn,
                gcb=gcb,
                gen_attention=gen_attention,
                gen_attention_blocks=stage_with_gen_attention[i])

            third_res_layer = tb_make_res_layer(
                self.block,
                self.inplanes,
                planes,
                num_blocks,
                stride=stride,
                dilation=dilation,
                style=self.style,
                with_cp=with_cp,
                conv_cfg=conv_cfg,
                norm_cfg=norm_cfg,
                dcn=dcn,
                gcb=gcb,
                gen_attention=gen_attention,
                gen_attention_blocks=stage_with_gen_attention[i])

            self.inplanes = planes * self.block.expansion
            layer_name = 'layer{}'.format(i + 1)
            self.add_module(layer_name, res_layer)
            self.res_layers.append(layer_name)

            #### add dual backbone
            second_layer_name = 'second_layer{}'.format(i + 1)
            self.add_module(second_layer_name, second_res_layer)
            self.second_res_layers.append(second_layer_name)


            #### add triple backbone
            third_layer_name = 'third_layer{}'.format(i + 1)
            self.add_module(third_layer_name, third_res_layer)
            self.third_res_layers.append(third_layer_name)


        self._freeze_stages()

        self.feat_dim = self.block.expansion * 64 * 2**(
            len(self.stage_blocks) - 1)
Пример #26
0
    def __init__(self,
                 extra,
                 in_channels=3,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 norm_eval=True,
                 with_cp=False,
                 zero_init_residual=False):
        super(HRNet_upsamp, self).__init__()
        self.extra = extra
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.norm_eval = norm_eval
        self.with_cp = with_cp
        self.zero_init_residual = zero_init_residual

        # stem net
        self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)

        self.conv1 = build_conv_layer(self.conv_cfg,
                                      in_channels,
                                      64,
                                      kernel_size=3,
                                      stride=2,
                                      padding=1,
                                      bias=False)

        self.add_module(self.norm1_name, norm1)
        self.conv2 = build_conv_layer(self.conv_cfg,
                                      64,
                                      64,
                                      kernel_size=3,
                                      stride=2,
                                      padding=1,
                                      bias=False)

        self.add_module(self.norm2_name, norm2)
        self.relu = nn.ReLU(inplace=True)
        #self.do_upsample = nn.Upsample(scale_factor=2,mode='bilinear')
        # stage 1
        self.stage1_cfg = self.extra['stage1']
        num_channels = self.stage1_cfg['num_channels'][0]
        block_type = self.stage1_cfg['block']
        num_blocks = self.stage1_cfg['num_blocks'][0]

        block = self.blocks_dict[block_type]
        stage1_out_channels = num_channels * block.expansion
        self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)

        # stage 2
        self.stage2_cfg = self.extra['stage2']
        num_channels = self.stage2_cfg['num_channels']
        block_type = self.stage2_cfg['block']

        block = self.blocks_dict[block_type]
        num_channels = [channel * block.expansion for channel in num_channels]
        self.transition1 = self._make_transition_layer([stage1_out_channels],
                                                       num_channels)
        self.stage2, pre_stage_channels = self._make_stage(
            self.stage2_cfg, num_channels)

        # stage 3
        self.stage3_cfg = self.extra['stage3']
        num_channels = self.stage3_cfg['num_channels']
        block_type = self.stage3_cfg['block']

        block = self.blocks_dict[block_type]
        num_channels = [channel * block.expansion for channel in num_channels]
        self.transition2 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels)
        self.stage3, pre_stage_channels = self._make_stage(
            self.stage3_cfg, num_channels)

        # stage 4
        deconv_cfg = {}
        deconv_cfg["NUM_CHANNELS"] = [32, 64, 128, 256]
        deconv_cfg["NUM_DECONVS"] = 1
        deconv_cfg["KERNEL_SIZE"] = [3, 3, 3, 3]
        deconv_cfg["NUM_BASIC_BLOCKS"] = 1
        self.num_deconvs = len(pre_stage_channels)  #deconv_cfg["NUM_DECONVS"]
        self.deconv_layers = self._make_deconv_layers(deconv_cfg,
                                                      pre_stage_channels)

        #self.upsample = nn.Upsample(scale_factor=2,mode='bilinear')
        self.stage4_cfg = self.extra['stage4']
        num_channels = self.stage4_cfg['num_channels']
        block_type = self.stage4_cfg['block']

        block = self.blocks_dict[block_type]
        num_channels = [channel * block.expansion for channel in num_channels]
        self.transition3 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels)
        self.stage4, pre_stage_channels = self._make_stage(
            self.stage4_cfg, num_channels)

        #deconv_cfg["NUM_BASIC_BLOCKS"] = 1
        self.num_deconvs = len(pre_stage_channels)  #deconv_cfg["NUM_DECONVS"]
        self.deconv_layers_2 = self._make_deconv_layers(
            deconv_cfg, pre_stage_channels)
Пример #27
0
    def __init__(self, 
                arch='efficientnet-b0', 
                out_indices=[4, 5, 6, 7, 8],
                norm_cfg=dict(type="BN"),
                norm_eval=True,
                override_params=None):
        super(EfficientNet, self).__init__()
        self._check_model_name_is_valid(arch)
        blocks_args, global_params = get_model_params(arch, override_params)
        assert isinstance(blocks_args, list), 'blocks_args should be a list'
        assert len(blocks_args) > 0, 'block args must be greater than 0'
        self._global_params = global_params
        self._blocks_args = blocks_args
        self.out_indices = out_indices
        self.norm_eval = norm_eval

        # Get static or dynamic convolution depending on image size
        Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)

        # Batch norm parameters
        bn_mom = 1 - self._global_params.batch_norm_momentum
        bn_eps = self._global_params.batch_norm_epsilon

        # Stem
        in_channels = 3  # rgb
        out_channels = round_filters(32, self._global_params)  # number of output channels
        self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
        self._bn0 = build_norm_layer(norm_cfg, num_features=out_channels, postfix=0)[1]

        # Build blocks
        self._blocks = nn.ModuleList([])
        self.per_last_stage_idx = []
        cum_idx = 0
        for block_args in self._blocks_args:
            # Update block input and output filters based on depth multiplier.
            block_args = block_args._replace(
                input_filters=round_filters(block_args.input_filters, self._global_params),
                output_filters=round_filters(block_args.output_filters, self._global_params),
                num_repeat=round_repeats(block_args.num_repeat, self._global_params)
            )

            # The first block needs to take care of stride and filter size increase.
            self._blocks.append(MBConvBlock(block_args, self._global_params, norm_cfg))
            if block_args.num_repeat > 1:
                block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
            for _ in range(block_args.num_repeat - 1):
                self._blocks.append(MBConvBlock(block_args, self._global_params, norm_cfg))
            # last_stage_idx
            cum_idx += block_args.num_repeat
            self.per_last_stage_idx.append(cum_idx)

        # out_indices_stage_idx
        self.out_stage_idx = []
        for stage in self.out_indices:
            self.out_stage_idx.append(self.per_last_stage_idx[stage - 2] - 1)


        # Head
        # in_channels = block_args.output_filters  # output of final block
        # out_channels = round_filters(1280, self._global_params)
        # self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
        # self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)

        # Final linear layer
        # self._avg_pooling = nn.AdaptiveAvgPool2d(1)
        # self._dropout = nn.Dropout(self._global_params.dropout_rate)
        # self._fc = nn.Linear(out_channels, self._global_params.num_classes)
        self._swish = MemoryEfficientSwish()
def make_res_layer(block,
                   inplanes,
                   planes,
                   blocks,
                   stride=1,
                   dilation=1,
                   groups=1,
                   base_width=4,
                   style='pytorch',
                   with_cp=False,
                   conv_cfg=None,
                   norm_cfg=dict(type='BN'),
                   dcn=None,
                   gcb=None,
                   sac=None,
                   rfp=None):
    downsample = None
    if stride != 1 or inplanes != planes * block.expansion:
        downsample = nn.Sequential(
            build_conv_layer(conv_cfg,
                             inplanes,
                             planes * block.expansion,
                             kernel_size=1,
                             stride=stride,
                             bias=False),
            build_norm_layer(norm_cfg, planes * block.expansion)[1],
        )

    layers = []
    layers.append(
        block(inplanes=inplanes,
              planes=planes,
              stride=stride,
              dilation=dilation,
              downsample=downsample,
              groups=groups,
              base_width=base_width,
              style=style,
              with_cp=with_cp,
              conv_cfg=conv_cfg,
              norm_cfg=norm_cfg,
              dcn=dcn,
              gcb=gcb,
              sac=sac,
              rfp=rfp))
    inplanes = planes * block.expansion
    for i in range(1, blocks):
        layers.append(
            block(inplanes=inplanes,
                  planes=planes,
                  stride=1,
                  dilation=dilation,
                  groups=groups,
                  base_width=base_width,
                  style=style,
                  with_cp=with_cp,
                  conv_cfg=conv_cfg,
                  norm_cfg=norm_cfg,
                  dcn=dcn,
                  gcb=gcb,
                  sac=sac))

    return nn.Sequential(*layers)
Пример #29
0
    def __init__(self,
                 extra,
                 in_channels=3,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 norm_eval=True,
                 with_cp=False,
                 zero_init_residual=False):
        super(HRNet, self).__init__()
        self.extra = extra
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.norm_eval = norm_eval
        self.with_cp = with_cp
        self.zero_init_residual = zero_init_residual

        # stem net
        self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)

        self.conv1 = build_conv_layer(
            self.conv_cfg,
            in_channels,
            64,
            kernel_size=3,
            stride=2,
            padding=1,
            bias=False)

        self.add_module(self.norm1_name, norm1)
        self.conv2 = build_conv_layer(
            self.conv_cfg,
            64,
            64,
            kernel_size=3,
            stride=2,
            padding=1,
            bias=False)

        self.add_module(self.norm2_name, norm2)
        self.relu = nn.ReLU(inplace=True)

        # stage 1
        self.stage1_cfg = self.extra['stage1']
        num_channels = self.stage1_cfg['num_channels'][0]
        block_type = self.stage1_cfg['block']
        num_blocks = self.stage1_cfg['num_blocks'][0]

        block = self.blocks_dict[block_type]
        stage1_out_channels = num_channels * block.expansion
        self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)

        # stage 2
        self.stage2_cfg = self.extra['stage2']
        num_channels = self.stage2_cfg['num_channels']
        block_type = self.stage2_cfg['block']

        block = self.blocks_dict[block_type]
        num_channels = [channel * block.expansion for channel in num_channels]
        self.transition1 = self._make_transition_layer([stage1_out_channels],
                                                       num_channels)
        self.stage2, pre_stage_channels = self._make_stage(
            self.stage2_cfg, num_channels)

        # stage 3
        self.stage3_cfg = self.extra['stage3']
        num_channels = self.stage3_cfg['num_channels']
        block_type = self.stage3_cfg['block']

        block = self.blocks_dict[block_type]
        num_channels = [channel * block.expansion for channel in num_channels]
        self.transition2 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels)
        self.stage3, pre_stage_channels = self._make_stage(
            self.stage3_cfg, num_channels)

        # stage 4
        self.stage4_cfg = self.extra['stage4']
        num_channels = self.stage4_cfg['num_channels']
        block_type = self.stage4_cfg['block']

        block = self.blocks_dict[block_type]
        num_channels = [channel * block.expansion for channel in num_channels]
        self.transition3 = self._make_transition_layer(pre_stage_channels,
                                                       num_channels)
        self.stage4, pre_stage_channels = self._make_stage(
            self.stage4_cfg, num_channels)