コード例 #1
0
 def __init__(self,
              num_input_features,
              growth_rate,
              bn_size,
              drop_rate,
              n_fold,
              memory_efficient=False):
     super(_DenseLayer, self).__init__()
     self.add_module(
         'norm1',
         build_norm_layer(norm_cfg, num_input_features, postfix=1)[1]),
     self.add_module('relu1', nn.ReLU(inplace=True)),
     self.add_module(
         'conv1',
         TSMConv(num_input_features,
                 bn_size * growth_rate,
                 kernel_size=1,
                 stride=1,
                 bias=False,
                 n_fold=n_fold)),
     self.add_module(
         'norm2',
         build_norm_layer(norm_cfg, bn_size * growth_rate, postfix=1)[1]),
     self.add_module('relu2', nn.ReLU(inplace=True)),
     self.add_module(
         'conv2',
         TSMConv(bn_size * growth_rate,
                 growth_rate,
                 kernel_size=3,
                 stride=1,
                 padding=1,
                 bias=False,
                 n_fold=n_fold)),
     self.drop_rate = drop_rate
     self.memory_efficient = memory_efficient
コード例 #2
0
    def _make_fuse_layers(self):
        if self.num_branches == 1:
            return None

        num_branches = self.num_branches
        in_channels = self.in_channels
        fuse_layers = []
        num_out_branches = num_branches if self.multiscale_output else 1
        for i in range(num_out_branches):
            fuse_layer = []
            for j in range(num_branches):
                if j > i:
                    fuse_layer.append(
                        nn.Sequential(
                            build_conv_layer(self.conv_cfg,
                                             in_channels[j],
                                             in_channels[i],
                                             kernel_size=1,
                                             stride=1,
                                             padding=0,
                                             bias=False),
                            build_norm_layer(self.norm_cfg, in_channels[i])[1],
                            nn.Upsample(scale_factor=2**(j - i),
                                        mode='nearest')))
                elif j == i:
                    fuse_layer.append(None)
                else:
                    conv_downsamples = []
                    for k in range(i - j):
                        if k == i - j - 1:
                            conv_downsamples.append(
                                nn.Sequential(
                                    build_conv_layer(self.conv_cfg,
                                                     in_channels[j],
                                                     in_channels[i],
                                                     kernel_size=3,
                                                     stride=2,
                                                     padding=1,
                                                     bias=False),
                                    build_norm_layer(self.norm_cfg,
                                                     in_channels[i])[1]))
                        else:
                            conv_downsamples.append(
                                nn.Sequential(
                                    build_conv_layer(self.conv_cfg,
                                                     in_channels[j],
                                                     in_channels[j],
                                                     kernel_size=3,
                                                     stride=2,
                                                     padding=1,
                                                     bias=False),
                                    build_norm_layer(self.norm_cfg,
                                                     in_channels[j])[1],
                                    nn.ReLU(inplace=False)))
                    fuse_layer.append(nn.Sequential(*conv_downsamples))
            fuse_layers.append(nn.ModuleList(fuse_layer))

        return nn.ModuleList(fuse_layers)
コード例 #3
0
ファイル: spnet.py プロジェクト: zhwzhong/vega
def make_connection(inplanes,
                    planes,
                    conv_cfg=None,
                    norm_cfg=dict(type='BN')):
    """Make connection layer.

    :param inplanes: channels of current stage output
    :type inplanes: int

    :param planes: channels of current stage input
    :type planes: int

    :param conv_cfg: conv config
    :type conv_cfg: dict

    :param norm_cfg: norm config
    :type norm_cfg: dict

    :return: layer
    """
    layer = nn.Sequential(
        build_conv_layer(
            conv_cfg,
            inplanes,
            planes,
            kernel_size=1,
            stride=1,
            bias=False),
        build_norm_layer(norm_cfg, planes)[1],
    )
    return layer
コード例 #4
0
 def __init__(self, num_input_features, num_output_features):
     super(_Transition, self).__init__()
     self.add_module('norm', build_norm_layer(norm_cfg, num_input_features, postfix=1)[1])
     self.add_module('relu', nn.ReLU(inplace=True))
     self.add_module('conv', TSMConv(num_input_features, num_output_features,
                                       kernel_size=1, stride=1, bias=False, tsm=False))
     self.add_module('pool', nn.AvgPool3d(kernel_size=[1, 2, 2], stride=[1, 2, 2]))#, padding=[0,1,1]
コード例 #5
0
    def _make_layer(self, block, inplanes, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                build_conv_layer(self.conv_cfg,
                                 inplanes,
                                 planes * block.expansion,
                                 kernel_size=1,
                                 stride=stride,
                                 bias=False),
                build_norm_layer(self.norm_cfg, planes * block.expansion)[1])

        layers = []
        layers.append(
            block(inplanes,
                  planes,
                  stride,
                  downsample=downsample,
                  with_cp=self.with_cp,
                  norm_cfg=self.norm_cfg,
                  conv_cfg=self.conv_cfg))
        inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(
                block(inplanes,
                      planes,
                      with_cp=self.with_cp,
                      norm_cfg=self.norm_cfg,
                      conv_cfg=self.conv_cfg))

        return nn.Sequential(*layers)
コード例 #6
0
 def __init__(self, num_input_features, num_output_features):
     super().__init__()
     self.add_module('norm', build_norm_layer(norm_cfg, num_input_features, postfix=1)[1])
     self.add_module('relu', nn.ReLU(inplace=True))
     self.add_module('conv', AlignShiftConv(num_input_features, num_output_features,
                                       kernel_size=1, stride=1, bias=False, alignshift=False))
     self.add_module('pool', nn.AvgPool3d(kernel_size=[1, 2, 2], stride=[1, 2, 2]))
コード例 #7
0
    def _build_upsample(self, inplanes, planes, norm_cfg=None):
        if self._with_deformable:
            mdcn = ModulatedDeformConvPack(inplanes,
                                           planes,
                                           3,
                                           stride=1,
                                           padding=1,
                                           dilation=1,
                                           deformable_groups=1)
        else:
            mdcn = nn.Conv2d(inplanes,
                             planes,
                             3,
                             stride=1,
                             padding=1,
                             dilation=1)
        up = nn.Upsample(scale_factor=2, mode='nearest')

        layers = [mdcn]
        if norm_cfg:
            layers.append(build_norm_layer(norm_cfg, planes)[1])
        layers.append(nn.ReLU(inplace=True))
        layers.append(up)

        return nn.Sequential(*layers)
コード例 #8
0
ファイル: ct_fpn.py プロジェクト: mrsempress/mmdetection
    def _make_deconv_layer(self, inplanes, num_layers, num_filters, num_kernels, norm_cfg=None):
        """

        Args:
            inplanes: in-channel num.
            num_layers: deconv layer num.
            num_filters: out channel of the deconv layers.
            num_kernels: int
            norm_cfg: dict()

        Returns:
            stacked deconv layers.
        """
        assert num_layers == len(num_filters), \
            'ERROR: num_deconv_layers is different len(num_deconv_filters)'
        assert num_layers == len(num_kernels), \
            'ERROR: num_deconv_layers is different len(num_deconv_filters)'

        layers = []
        for i in range(num_layers):
            kernel, padding, output_padding = self._get_deconv_cfg(num_kernels[i])
            planes = num_filters[i]
            inplanes = inplanes if i == 0 else num_filters[i - 1]

            mdcn = ModulatedDeformConvPack(inplanes, planes, 3, stride=1,
                                           padding=1, dilation=1, deformable_groups=1)
            up = nn.ConvTranspose2d(
                in_channels=planes,
                out_channels=planes,
                kernel_size=kernel,
                stride=2,
                padding=padding,
                output_padding=output_padding,
                bias=False)
            self.fill_up_weights(up)

            layers.append(mdcn)
            if norm_cfg:
                layers.append(build_norm_layer(norm_cfg, planes)[1])
            layers.append(nn.ReLU(inplace=True))

            layers.append(up)
            if norm_cfg:
                layers.append(build_norm_layer(norm_cfg, planes)[1])
            layers.append(nn.ReLU(inplace=True))

        return nn.Sequential(*layers)
コード例 #9
0
def make_multigrid(block,
                   inplanes,
                   planes,
                   blocks,
                   stride=1,
                   dilation=1,
                   style='pytorch',
                   with_cp=False,
                   conv_cfg=None,
                   norm_cfg=dict(type='BN'),
                   dcn=None,
                   gcb=None,
                   gen_attention=None,
                   gen_attention_blocks=[]):
    downsample = None
    if stride != 1 or inplanes != planes * block.expansion:
        downsample = nn.Sequential(
            build_conv_layer(conv_cfg,
                             inplanes,
                             planes * block.expansion,
                             kernel_size=1,
                             stride=stride,
                             bias=False),
            build_norm_layer(norm_cfg, planes * block.expansion)[1],
        )

    layers = []
    layers.append(
        block(inplanes=inplanes,
              planes=planes,
              stride=stride,
              dilation=blocks[0] * dilation,
              downsample=downsample,
              style=style,
              with_cp=with_cp,
              conv_cfg=conv_cfg,
              norm_cfg=norm_cfg,
              dcn=dcn,
              gcb=gcb,
              gen_attention=gen_attention if
              (0 in gen_attention_blocks) else None))
    inplanes = planes * block.expansion
    for i in range(1, len(blocks)):
        layers.append(
            block(inplanes=inplanes,
                  planes=planes,
                  stride=1,
                  dilation=blocks[i] * dilation,
                  style=style,
                  with_cp=with_cp,
                  conv_cfg=conv_cfg,
                  norm_cfg=norm_cfg,
                  dcn=dcn,
                  gcb=gcb,
                  gen_attention=gen_attention if
                  (i in gen_attention_blocks) else None))

    return nn.Sequential(*layers)
コード例 #10
0
def make_res_layer(block,
                   inplanes,
                   planes,
                   blocks,
                   stride=1,
                   dilation=1,
                   groups=1,
                   base_width=4,
                   style='pytorch',
                   with_cp=False,
                   conv_cfg=None,
                   norm_cfg=dict(type='BN'),
                   dcn=None,
                   gcb=None):
    downsample = None
    if stride != 1 or inplanes != planes * block.expansion:
        downsample = nn.Sequential(
            build_conv_layer(conv_cfg,
                             inplanes,
                             planes * block.expansion,
                             kernel_size=1,
                             stride=stride,
                             bias=False),
            build_norm_layer(norm_cfg, planes * block.expansion)[1],
        )

    layers = []
    layers.append(
        block(inplanes=inplanes,
              planes=planes,
              stride=stride,
              dilation=dilation,
              downsample=downsample,
              groups=groups,
              base_width=base_width,
              style=style,
              with_cp=with_cp,
              conv_cfg=conv_cfg,
              norm_cfg=norm_cfg,
              dcn=dcn,
              gcb=gcb))
    inplanes = planes * block.expansion
    for i in range(1, blocks):
        layers.append(
            block(inplanes=inplanes,
                  planes=planes,
                  stride=1,
                  dilation=dilation,
                  groups=groups,
                  base_width=base_width,
                  style=style,
                  with_cp=with_cp,
                  conv_cfg=conv_cfg,
                  norm_cfg=norm_cfg,
                  dcn=dcn,
                  gcb=gcb))

    return nn.Sequential(*layers)
コード例 #11
0
ファイル: spnet.py プロジェクト: zeyefkey/vega
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None,
                 gcb=None,
                 gen_attention=None,
                 groups=1,
                 base_width=4):
        """Initialize."""
        super(BasicBlock, self).__init__()
        assert dcn is None, "Not implemented yet."
        assert gen_attention is None, "Not implemented yet."
        assert gcb is None, "Not implemented yet."
        if groups != 1:
            raise ValueError('BasicBlock only supports groups=1 and base_width=64')

        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)

        self.conv1 = build_conv_layer(
            conv_cfg,
            inplanes,
            planes,
            3,
            stride=stride,
            padding=dilation,
            dilation=dilation,
            bias=False)
        self.add_module(self.norm1_name, norm1)
        self.conv2 = build_conv_layer(
            conv_cfg, planes, planes, 3, padding=1, bias=False)
        self.add_module(self.norm2_name, norm2)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
        assert not with_cp
コード例 #12
0
    def _make_transition_layer(self, num_channels_pre_layer,
                               num_channels_cur_layer):
        num_branches_cur = len(num_channels_cur_layer)
        num_branches_pre = len(num_channels_pre_layer)

        transition_layers = []
        for i in range(num_branches_cur):
            if i < num_branches_pre:
                if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
                    transition_layers.append(
                        nn.Sequential(
                            build_conv_layer(self.conv_cfg,
                                             num_channels_pre_layer[i],
                                             num_channels_cur_layer[i],
                                             kernel_size=3,
                                             stride=1,
                                             padding=1,
                                             bias=False),
                            build_norm_layer(self.norm_cfg,
                                             num_channels_cur_layer[i])[1],
                            nn.ReLU(inplace=True)))
                else:
                    transition_layers.append(None)
            else:
                conv_downsamples = []
                for j in range(i + 1 - num_branches_pre):
                    in_channels = num_channels_pre_layer[-1]
                    out_channels = num_channels_cur_layer[i] \
                        if j == i - num_branches_pre else in_channels
                    conv_downsamples.append(
                        nn.Sequential(
                            build_conv_layer(self.conv_cfg,
                                             in_channels,
                                             out_channels,
                                             kernel_size=3,
                                             stride=2,
                                             padding=1,
                                             bias=False),
                            build_norm_layer(self.norm_cfg, out_channels)[1],
                            nn.ReLU(inplace=True)))
                transition_layers.append(nn.Sequential(*conv_downsamples))

        return nn.ModuleList(transition_layers)
コード例 #13
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None,
                 gcb=None,
                 gen_attention=None):
        super(BasicBlock, self).__init__()
        assert dcn is None, "Not implemented yet."
        assert gen_attention is None, "Not implemented yet."
        assert gcb is None, "Not implemented yet."

        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)

        self.conv1 = build_conv_layer(conv_cfg,
                                      inplanes,
                                      planes,
                                      3,
                                      stride=stride,
                                      padding=dilation,
                                      dilation=dilation,
                                      bias=False)
        self.add_module(self.norm1_name, norm1)
        self.conv2 = build_conv_layer(conv_cfg,
                                      planes,
                                      planes,
                                      3,
                                      padding=1,
                                      bias=False)
        self.add_module(self.norm2_name, norm2)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
        assert not with_cp
コード例 #14
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None,
                 gcb=None,
                 gen_attention=None):
        super(BasicBlock, self).__init__()

        self.norm1_name, norm1 = build_norm_layer(norm_cfg,
                                                  int(planes / 2),
                                                  postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)

        self.conv1 = build_conv_layer(conv_cfg,
                                      inplanes,
                                      int(planes / 2),
                                      1,
                                      stride=stride,
                                      padding=0,
                                      dilation=dilation,
                                      bias=False)
        self.add_module(self.norm1_name, norm1)
        self.relu1 = nn.LeakyReLU(0.1)
        self.conv2 = build_conv_layer(conv_cfg,
                                      int(planes / 2),
                                      planes,
                                      3,
                                      padding=1,
                                      bias=False)
        self.add_module(self.norm2_name, norm2)

        self.relu2 = nn.LeakyReLU(0.1)
        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
        assert not with_cp
コード例 #15
0
 def _make_stem_layer(self):
     self.conv1 = build_conv_layer(self.conv_cfg,
                                   3,
                                   64,
                                   kernel_size=7,
                                   stride=2,
                                   padding=3,
                                   bias=False)
     self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
     self.add_module(self.norm1_name, norm1)
     self.relu = nn.ReLU(inplace=True)
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
コード例 #16
0
ファイル: spnet.py プロジェクト: zhwzhong/vega
 def _make_stem_layer2(self, base_channel=64):
     """Make stem layer 2, after reignite."""
     self.conv1 = nn.Conv2d(
         3,
         base_channel // 2,
         kernel_size=3,
         stride=2,
         padding=1,
         bias=False)
     self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, base_channel // 2, postfix=1)
     self.add_module(self.norm1_name, norm1)
     self.conv2 = nn.Conv2d(
         base_channel // 2,
         base_channel,
         kernel_size=3,
         stride=2,
         padding=1,
         bias=False)
     self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, base_channel, postfix=2)
     self.add_module(self.norm2_name, norm2)
     self.relu = nn.ReLU(inplace=True)
コード例 #17
0
 def _make_stem_layer(self):
     self.conv1 = build_conv_layer(self.conv_cfg,
                                   3,
                                   self.inplanes,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1,
                                   bias=False)
     self.norm1_name, norm1 = build_norm_layer(self.norm_cfg,
                                               self.inplanes,
                                               postfix=1)
     self.add_module(self.norm1_name, norm1)
     self.relu1 = nn.LeakyReLU(0.1)
コード例 #18
0
    def build_upsample(self, inplanes, planes, norm_cfg=None):
        mdcn = ModulatedDeformConvPack(inplanes, planes, 3, stride=1,
                                       padding=1, dilation=1, deformable_groups=1)
        up = nn.UpsamplingBilinear2d(scale_factor=2)

        layers = []
        layers.append(mdcn)
        if norm_cfg:
            layers.append(build_norm_layer(norm_cfg, planes)[1])
        layers.append(nn.ReLU(inplace=True))
        layers.append(up)

        return nn.Sequential(*layers)
コード例 #19
0
ファイル: ttf_head_full.py プロジェクト: Leotju/ttfnet
 def build_dcn(self, inplanes, planes, norm_cfg=None):
     mdcn = ModulatedDeformConvPack(inplanes,
                                    planes,
                                    3,
                                    stride=1,
                                    padding=1,
                                    dilation=1,
                                    deformable_groups=1)
     layers = []
     layers.append(mdcn)
     if norm_cfg:
         layers.append(build_norm_layer(norm_cfg, planes)[1])
     layers.append(nn.ReLU(inplace=True))
     return nn.Sequential(*layers)
コード例 #20
0
    def __init__(self, in_channels, out_channels, norm_cfg=dict(type='BN')):
        mdcn = ModulatedDeformConvPack(
            in_channels,
            out_channels,
            3,
            stride=1,
            padding=1,
            dilation=1,
            deformable_groups=1)
        up = nn.UpsamplingBilinear2d(scale_factor=2)

        layers = []
        layers.append(mdcn)
        if norm_cfg:
            layers.append(build_norm_layer(norm_cfg, out_channels)[1])
        layers.append(nn.ReLU(inplace=True))
        layers.append(up)
        super(UpsamplingLayers, self).__init__(*layers)
コード例 #21
0
ファイル: ttfx_fpn.py プロジェクト: mrsempress/mmdetection
 def __init__(self,
              in_channels,
              out_channels,
              conv_cfg=None,
              norm_cfg=dict(type='BN'),
              no_upsample=False,
              vallina=False,
              offset_mean=False):
     if vallina:
         if isinstance(vallina, int):
             padding = int((vallina - 1) / 2)
             dila = padding
             mdcn = nn.Conv2d(
                 in_channels,
                 out_channels,
                 3,
                 padding=padding,
                 dilation=dila)
         else:
             mdcn = nn.Conv2d(
                 in_channels,
                 out_channels,
                 3,
                 padding=1)
     elif conv_cfg:
         mdcn = build_conv_layer(conv_cfg, in_channels, out_channels)
     else:
         mdcn = ModulatedDeformConvPack(
             in_channels,
             out_channels,
             3,
             offset_mean=offset_mean,
             stride=1,
             padding=1,
             dilation=1,
             deformable_groups=1)
     layers = []
     layers.append(mdcn)
     if norm_cfg:
         layers.append(build_norm_layer(norm_cfg, out_channels)[1])
     layers.append(nn.ReLU(inplace=True))
     if not no_upsample:
         layers.append(nn.UpsamplingBilinear2d(scale_factor=2))
     super(UpsamplingLayers, self).__init__(*layers)
コード例 #22
0
    def build_upsample(self,
                       inplanes,
                       planes,
                       norm_cfg=None,
                       no_upsample=False):
        if self.upsample_vanilla_conv:
            if isinstance(self.upsample_vanilla_conv, int):
                padding = int((self.upsample_vanilla_conv - 1) / 2)
                dila = padding
                mdcn = nn.Conv2d(inplanes,
                                 planes,
                                 3,
                                 stride=1,
                                 padding=padding,
                                 dilation=dila)
            else:
                mdcn = nn.Conv2d(inplanes, planes, 3, stride=1, padding=1)
        elif self.upsample_multiscale_conv:
            mdcn = build_conv_layer(dict(type='MultiScaleConv'), inplanes,
                                    planes)
        elif self.use_trident:
            mdcn = build_conv_layer(dict(type='TriConv'), inplanes, planes)
        elif self.up_conv_cfg:
            mdcn = build_conv_layer(self.up_conv_cfg, inplanes, planes)
        else:
            mdcn = ModulatedDeformConvPack(inplanes,
                                           planes,
                                           3,
                                           offset_mean=self.dcn_mean,
                                           stride=1,
                                           padding=1,
                                           dilation=1,
                                           deformable_groups=1)
        layers = []
        layers.append(mdcn)
        if norm_cfg:
            layers.append(build_norm_layer(norm_cfg, planes)[1])
        layers.append(nn.ReLU(inplace=True))
        if not no_upsample:
            up = nn.UpsamplingBilinear2d(scale_factor=2)
            layers.append(up)

        return nn.Sequential(*layers)
コード例 #23
0
def common_conv2d(inplanes,
                  planes,
                  kernel,
                  padding,
                  stride,
                  norm_cfg=dict(type='BN')):
    cell = OrderedDict()
    cell['conv'] = nn.Conv2d(inplanes,
                             planes,
                             kernel_size=kernel,
                             stride=stride,
                             padding=padding,
                             bias=False)
    if norm_cfg:
        norm_name, norm = build_norm_layer(norm_cfg, planes)
        cell[norm_name] = norm

    cell['leakyrelu'] = nn.LeakyReLU(0.1)
    cell = nn.Sequential(cell)
    return cell
コード例 #24
0
ファイル: cd_mask_rcnn.py プロジェクト: LokeZhou/mydetection
    def __init__(self,
                 backbone,
                 rpn_head,
                 bbox_roi_extractor,
                 bbox_head,
                 mask_roi_extractor,
                 mask_head,
                 train_cfg,
                 test_cfg,
                 neck=None,
                 shared_head=None,
                 pretrained=None):
        super(CDMaskRCNN, self).__init__(
            backbone=backbone,
            neck=neck,
            shared_head=shared_head,
            rpn_head=rpn_head,
            bbox_roi_extractor=bbox_roi_extractor,
            bbox_head=bbox_head,
            mask_roi_extractor=mask_roi_extractor,
            mask_head=mask_head,
            train_cfg=train_cfg,
            test_cfg=test_cfg,
            pretrained=pretrained)

        conv_layer = []
        for i in range(len(self.backbone.out_indices)):
            layer = nn.Sequential(
            build_conv_layer(
                None,
                1,
                256 * 2**i,
                kernel_size=1,
                stride=1,
                bias=False),
            build_norm_layer(dict(type='BN', requires_grad=True), 256 * 2**i)[1],
            nn.ReLU(inplace=True)
        )

            conv_layer.append(layer)
        self.conv = nn.Sequential(*conv_layer)
コード例 #25
0
    def _make_one_branch(self,
                         branch_index,
                         block,
                         num_blocks,
                         num_channels,
                         stride=1):
        downsample = None
        if stride != 1 or \
                self.in_channels[branch_index] != \
                num_channels[branch_index] * block.expansion:
            downsample = nn.Sequential(
                build_conv_layer(self.conv_cfg,
                                 self.in_channels[branch_index],
                                 num_channels[branch_index] * block.expansion,
                                 kernel_size=1,
                                 stride=stride,
                                 bias=False),
                build_norm_layer(self.norm_cfg, num_channels[branch_index] *
                                 block.expansion)[1])

        layers = []
        layers.append(
            block(self.in_channels[branch_index],
                  num_channels[branch_index],
                  stride,
                  downsample=downsample,
                  with_cp=self.with_cp,
                  norm_cfg=self.norm_cfg,
                  conv_cfg=self.conv_cfg))
        self.in_channels[branch_index] = \
            num_channels[branch_index] * block.expansion
        for i in range(1, num_blocks[branch_index]):
            layers.append(
                block(self.in_channels[branch_index],
                      num_channels[branch_index],
                      with_cp=self.with_cp,
                      norm_cfg=self.norm_cfg,
                      conv_cfg=self.conv_cfg))

        return nn.Sequential(*layers)
コード例 #26
0
 def __init__(self,
              in_channels,
              out_channels,
              norm_cfg=dict(type='BN'),
              no_upsample=False,
              use_tri=False):
     if use_tri:
         mdcn = TridentConv2d(in_channels, out_channels)
     else:
         mdcn = ModulatedDeformConvPack(in_channels,
                                        out_channels,
                                        3,
                                        stride=1,
                                        padding=1,
                                        dilation=1,
                                        deformable_groups=1)
     layers = []
     layers.append(mdcn)
     if norm_cfg:
         layers.append(build_norm_layer(norm_cfg, out_channels)[1])
     layers.append(nn.ReLU(inplace=True))
     if not no_upsample:
         layers.append(nn.UpsamplingBilinear2d(scale_factor=2))
     super(UpsamplingLayers, self).__init__(*layers)
コード例 #27
0
    def __init__(self, 
                out_dim=256,
                n_cts=3,
                fpn_finest_layer=1,
                memory_efficient=True,
                n_fold=8,):
        super().__init__()
        self.depth = 121
        self.feature_upsample = True
        self.fpn_finest_layer = fpn_finest_layer
        self.out_dim = out_dim
        self.n_cts = n_cts
        self.mid_ct = n_cts//2
        self.n_fold = n_fold
        assert self.depth in [121]
        if self.depth == 121:
            num_init_features = 64
            growth_rate = 32
            block_config = (6, 12, 24)
            self.in_dim = [64, 256, 512, 1024]
        bn_size = 4
        drop_rate = 0

        # First convolution
        self.conv0 = TSMConv(1, num_init_features, kernel_size=7, stride=2, padding=3, bias=False, tsm=False)
        self.norm0 = build_norm_layer(norm_cfg, num_init_features, postfix=1)[1]
        self.relu0 = nn.ReLU(inplace=True)
        self.pool0 = nn.MaxPool3d(kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1])

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
                                bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate,
                                n_fold=self.n_fold, memory_efficient=memory_efficient)
            self.add_module('denseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            reductionz = _Reduction_z(num_features, self.n_cts)
            self.add_module('reductionz%d' % (i + 1), reductionz)
            if i != len(block_config) - 1:
                trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
                self.add_module('transition%d' % (i + 1), trans)
                num_features = num_features // 2

        # Final batch norm
        # self.add_module('norm5', nn.BatchNorm2d(num_features))

        # Official init from torch repo.
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight.data)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.bias.data.zero_()

        if self.feature_upsample:
            for p in range(4, self.fpn_finest_layer - 1, -1):
                layer = nn.Conv2d(self.in_dim[p - 1], self.out_dim, 1)
                name = 'lateral%d' % p
                self.add_module(name, layer)

                nn.init.kaiming_uniform_(layer.weight, a=1)
                nn.init.constant_(layer.bias, 0)
        self.init_weights()
コード例 #28
0
ファイル: spnet.py プロジェクト: zhwzhong/vega
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None,
                 gcb=None,
                 gen_attention=None,
                 groups=1,
                 base_width=4):
        super(Bottleneck, self).__init__()
        assert style in ['pytorch', 'caffe']
        assert dcn is None or isinstance(dcn, dict)
        assert gcb is None or isinstance(gcb, dict)
        assert gen_attention is None or isinstance(gen_attention, dict)

        self.inplanes = inplanes
        self.planes = planes
        self.stride = stride
        self.dilation = dilation
        self.style = style
        self.with_cp = with_cp
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.dcn = dcn
        self.with_dcn = dcn is not None
        self.gcb = gcb
        self.with_gcb = gcb is not None
        self.gen_attention = gen_attention
        self.with_gen_attention = gen_attention is not None

        if groups == 1:
            width = self.planes
        else:
            width = math.floor(self.planes * (base_width / 64)) * groups

        if self.style == 'pytorch':
            self.conv1_stride = 1
            self.conv2_stride = stride
        else:
            self.conv1_stride = stride
            self.conv2_stride = 1

        self.norm1_name, norm1 = build_norm_layer(norm_cfg, width, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, width, postfix=2)
        self.norm3_name, norm3 = build_norm_layer(
            norm_cfg, planes * self.expansion, postfix=3)

        self.conv1 = build_conv_layer(
            conv_cfg,
            inplanes,
            width,
            kernel_size=1,
            stride=self.conv1_stride,
            bias=False)
        self.add_module(self.norm1_name, norm1)
        fallback_on_stride = False
        self.with_modulated_dcn = False
        if self.with_dcn:
            fallback_on_stride = dcn.get('fallback_on_stride', False)
            self.with_modulated_dcn = dcn.get('modulated', False)
        if not self.with_dcn or fallback_on_stride:
            self.conv2 = build_conv_layer(
                conv_cfg,
                width,
                width,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=dilation,
                dilation=dilation,
                groups=groups,
                bias=False)
        else:
            assert conv_cfg is None, 'conv_cfg must be None for DCN'
            self.deformable_groups = dcn.get('deformable_groups', 1)
            if not self.with_modulated_dcn:
                conv_op = DeformConv
                offset_channels = 18
            else:
                conv_op = ModulatedDeformConv
                offset_channels = 27
            self.conv2_offset = nn.Conv2d(
                planes,
                self.deformable_groups * offset_channels,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=dilation,
                dilation=dilation)
            self.conv2 = conv_op(
                width,
                width,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=dilation,
                dilation=dilation,
                groups=groups,
                deformable_groups=self.deformable_groups,
                bias=False)
        self.add_module(self.norm2_name, norm2)
        self.conv3 = build_conv_layer(
            conv_cfg,
            width,
            planes * self.expansion,
            kernel_size=1,
            bias=False)
        self.add_module(self.norm3_name, norm3)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample

        if self.with_gcb:
            gcb_inplanes = planes * self.expansion
            self.context_block = ContextBlock(
                inplanes=gcb_inplanes,
                **gcb
            )

        # gen_attention
        if self.with_gen_attention:
            self.gen_attention_block = GeneralizedAttention(
                planes, **gen_attention)
コード例 #29
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 with_cp=False,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 dcn=None):
        """Bottleneck block for ResNet.
        If style is "pytorch", the stride-two layer is the 3x3 conv layer,
        if it is "caffe", the stride-two layer is the first 1x1 conv layer.
        """
        super(Bottleneck, self).__init__()
        assert style in ['pytorch', 'caffe']
        assert dcn is None or isinstance(dcn, dict)
        self.inplanes = inplanes
        self.planes = planes
        self.stride = stride
        self.dilation = dilation
        self.style = style
        self.with_cp = with_cp
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.dcn = dcn
        self.with_dcn = dcn is not None
        if self.style == 'pytorch':
            self.conv1_stride = 1
            self.conv2_stride = stride
        else:
            self.conv1_stride = stride
            self.conv2_stride = 1

        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
        self.norm3_name, norm3 = build_norm_layer(
            norm_cfg, planes * self.expansion, postfix=3)

        self.conv1 = build_conv_layer(
            conv_cfg,
            inplanes,
            planes,
            kernel_size=1,
            stride=self.conv1_stride,
            bias=False)
        self.add_module(self.norm1_name, norm1)
        fallback_on_stride = False
        self.with_modulated_dcn = False
        if self.with_dcn:
            fallback_on_stride = dcn.get('fallback_on_stride', False)
            self.with_modulated_dcn = dcn.get('modulated', False)
        if not self.with_dcn or fallback_on_stride:
            self.conv2 = build_conv_layer(
                conv_cfg,
                planes,
                planes,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=dilation,
                dilation=dilation,
                bias=False)
        else:
            assert conv_cfg is None, 'conv_cfg must be None for DCN'
            deformable_groups = dcn.get('deformable_groups', 1)
            if not self.with_modulated_dcn:
                conv_op = DeformConv
                offset_channels = 18
            else:
                conv_op = ModulatedDeformConv
                offset_channels = 27
            self.conv2_offset = nn.Conv2d(
                inplanes,
                deformable_groups * offset_channels,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=dilation,
                dilation=dilation)
            self.conv2 = conv_op(
                planes,
                planes,
                kernel_size=3,
                stride=self.conv2_stride,
                padding=dilation,
                dilation=dilation,
                deformable_groups=deformable_groups,
                bias=False)
        self.add_module(self.norm2_name, norm2)
        self.conv3 = build_conv_layer(
            conv_cfg,
            planes,
            planes * self.expansion,
            kernel_size=1,
            bias=False)
        self.add_module(self.norm3_name, norm3)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
コード例 #30
0
ファイル: spnet.py プロジェクト: zhwzhong/vega
def make_res_layer(block,
                   inplanes,
                   planes,
                   arch,
                   stride=1,
                   dilation=1,
                   style='pytorch',
                   with_cp=False,
                   conv_cfg=None,
                   norm_cfg=dict(type='BN'),
                   dcn=None,
                   gcb=None,
                   gen_attention=None,
                   gen_attention_blocks=[],
                   groups=1,
                   base_width=4):
    """Make resnet layer.

    :param block: block function

    :param inplanes: input feature map channel num
    :type inplanes: int

    :param planes: output feature map channel num
    :type planes: int

    :param arch: model arch
    :type arch: str

    :param stride: stride
    :type stride: int

    :param dilation: dilation
    :type dilation: int

    :param style: style
    :type style: str

    :param with_cp: with cp
    :type with_cp: bool

    :param conv_cfg: conv config
    :type conv_cfg: dict

    :param norm_cfg: norm config
    :type norm_cfg: dict

    :param dcn: deformable conv network

    :param gcb: gcb

    :param gen_attention: gen attention

    :param gen_attention_blocks: gen attention block

    :param groups: groups
    :type planes: int

    :param base_width: base width
    :type planes: int

    :return: layer
    """
    layers = []
    for i, layer_type in enumerate(arch):
        downsample = None
        stride = stride if i == 0 else 1
        if layer_type == 2:
            planes *= 2
        if stride != 1 or inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                build_conv_layer(
                    conv_cfg,
                    inplanes,
                    planes * block.expansion,
                    kernel_size=1,
                    stride=stride,
                    bias=False),
                build_norm_layer(norm_cfg, planes * block.expansion)[1])
        layers.append(
            block(
                inplanes=inplanes,
                planes=planes,
                stride=stride,
                dilation=dilation,
                downsample=downsample,
                style=style,
                with_cp=with_cp,
                conv_cfg=conv_cfg,
                norm_cfg=norm_cfg,
                dcn=dcn,
                gcb=gcb,
                groups=groups,
                base_width=base_width,
                gen_attention=gen_attention if
                (i in gen_attention_blocks) else None))
        inplanes = planes * block.expansion
    return nn.Sequential(*layers)