示例#1
0
def downsample_avg(in_chs,
                   out_chs,
                   kernel_size,
                   stride=1,
                   dilation=1,
                   norm_layer=None):
    """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment."""
    norm_layer = norm_layer or nn.BatchNorm2d
    avg_stride = stride if dilation == 1 else 1
    pool = nn.Identity()
    if stride > 1 or dilation > 1:
        avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
        pool = avg_pool_fn(2,
                           avg_stride,
                           ceil_mode=True,
                           count_include_pad=False)
    return nn.Sequential(*[
        pool,
        ConvBnAct(in_chs,
                  out_chs,
                  1,
                  stride=1,
                  norm_layer=norm_layer,
                  act_layer=None)
    ])
示例#2
0
 def __init__(self,
              in_chs,
              out_chs,
              dilation=1,
              bottle_ratio=0.5,
              groups=1,
              act_layer=nn.ReLU,
              norm_layer=nn.BatchNorm2d,
              attn_layer=None,
              aa_layer=None,
              drop_block=None,
              drop_path=None):
     super(DarkBlock_ACON, self).__init__()
     mid_chs = int(round(out_chs * bottle_ratio))
     ckwargs = dict(act_layer=act_layer,
                    norm_layer=norm_layer,
                    aa_layer=aa_layer,
                    drop_block=drop_block)
     self.conv1 = ConvBnAct(in_chs, mid_chs, kernel_size=1, **ckwargs)
     self.conv2 = Conv3x3_ACON(mid_chs,
                               out_chs,
                               kernel_size=3,
                               dilation=dilation,
                               groups=groups)
     self.attn = create_attn(attn_layer, channels=out_chs)
     self.drop_path = drop_path
示例#3
0
def downsample_conv(
        in_chs, out_chs, kernel_size, stride=1, dilation=1, norm_layer=None):
    norm_layer = norm_layer or nn.BatchNorm2d
    kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size
    dilation = dilation if kernel_size > 1 else 1
    return ConvBnAct(
        in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, norm_layer=norm_layer, act_layer=None)
示例#4
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 bottleneck_factor=1,
                 group_width=1,
                 se_ratio=0.0):
        super(Bottleneck, self).__init__()
        self.use_se = (se_ratio > 0.0)
        assert (bottleneck_factor == 1)

        mid_channels = out_channels // bottleneck_factor
        groups = mid_channels // group_width

        self.conv1 = ConvBnAct(in_channels, mid_channels, kernel_size=1)
        self.conv2 = ConvBnAct(mid_channels,
                               mid_channels,
                               kernel_size=3,
                               stride=stride,
                               dilation=1,
                               groups=groups)
        if self.use_se:
            self.se = SEModule(mid_channels,
                               reduction_channels=int(
                                   round(in_channels * se_ratio)))
        self.conv3 = ConvBnAct(mid_channels,
                               out_channels,
                               kernel_size=1,
                               act_layer=None)
        self.act3 = nn.ReLU(inplace=True)

        if (in_channels != out_channels) or (stride != 1):
            self.downsample = ConvBnAct(in_channels=in_channels,
                                        out_channels=out_channels,
                                        kernel_size=1,
                                        stride=stride,
                                        dilation=1,
                                        norm_layer=nn.BatchNorm2d,
                                        act_layer=None)
        else:
            self.downsample = None
示例#5
0
    def __init__(self,
                 in_chs,
                 out_chs,
                 stride=1,
                 dilation=1,
                 bottleneck_ratio=1,
                 group_width=1,
                 se_ratio=0.25,
                 downsample=None,
                 act_layer=nn.ReLU,
                 norm_layer=nn.BatchNorm2d,
                 aa_layer=None,
                 drop_block=None,
                 drop_path=None):
        super(Bottleneck, self).__init__()
        bottleneck_chs = int(round(out_chs * bottleneck_ratio))
        groups = bottleneck_chs // group_width

        cargs = dict(act_layer=act_layer,
                     norm_layer=norm_layer,
                     aa_layer=aa_layer,
                     drop_block=drop_block)
        self.conv1 = ConvBnAct(in_chs, bottleneck_chs, kernel_size=1, **cargs)
        self.conv2 = ConvBnAct(bottleneck_chs,
                               bottleneck_chs,
                               kernel_size=3,
                               stride=stride,
                               dilation=dilation,
                               groups=groups,
                               **cargs)
        if se_ratio:
            se_channels = int(round(in_chs * se_ratio))
            self.se = SEModule(bottleneck_chs, reduction_channels=se_channels)
        else:
            self.se = None
        cargs['act_layer'] = None
        self.conv3 = ConvBnAct(bottleneck_chs, out_chs, kernel_size=1, **cargs)
        self.act3 = act_layer(inplace=True)
        self.downsample = downsample
        self.drop_path = drop_path
示例#6
0
    def __init__(self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.,
                 drop_path_rate=0., zero_init_last_bn=True, in_channels=3, in_size=(224, 224)):
        super().__init__()
        # TODO add drop block, drop path, anti-aliasing, custom bn/act args
        self.in_size = in_size
        self.num_classes = num_classes
        self.drop_rate = drop_rate
        assert output_stride in (8, 16, 32)

        # Construct the stem
        stem_width = cfg['stem_width']
        self.stem = ConvBnAct(in_chans, stem_width, 3, stride=2)
        self.feature_info = [dict(num_chs=stem_width, reduction=2, module='stem')]

        # Construct the stages
        prev_width = stem_width
        curr_stride = 2
        stage_params = self._get_stage_params(cfg, output_stride=output_stride, drop_path_rate=drop_path_rate)
        se_ratio = cfg['se_ratio']
        for i, stage_args in enumerate(stage_params):
            stage_name = "s{}".format(i + 1)
            self.add_module(stage_name, RegStage(prev_width, se_ratio=se_ratio, **stage_args))
            prev_width = stage_args['out_chs']
            curr_stride *= stage_args['stride']
            self.feature_info += [dict(num_chs=prev_width, reduction=curr_stride, module=stage_name)]

        # Construct the head
        self.num_features = prev_width
        self.head = ClassifierHead(
            in_chs=prev_width, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, mean=0.0, std=0.01)
                nn.init.zeros_(m.bias)
        if zero_init_last_bn:
            for m in self.modules():
                if hasattr(m, 'zero_init_last_bn'):
                    m.zero_init_last_bn()
示例#7
0
    def __init__(self,
                 cfg,
                 in_channels=3,
                 in_size=(224, 224),
                 num_classes=1000):
        super().__init__()
        self.in_size = in_size
        self.num_classes = num_classes

        self.features = nn.Sequential()

        stem_width = 32

        # Construct the stem
        self.features.add_module(
            "stem",
            ConvBnAct(in_channels=in_channels,
                      out_channels=stem_width,
                      kernel_size=3,
                      stride=2))

        # Construct the stages
        in_channels = stem_width
        stage_params = self._get_stage_params(cfg)
        se_ratio = cfg['se_ratio']
        for i, stage_args in enumerate(stage_params):
            stage_name = "s{}".format(i + 1)
            self.features.add_module(
                stage_name,
                RegStage(in_channels=in_channels,
                         se_ratio=se_ratio,
                         **stage_args))
            in_channels = stage_args['out_channels']

        self.features.add_module("global_pool",
                                 nn.AdaptiveAvgPool2d(output_size=1))
        self.fc = nn.Linear(in_features=in_channels,
                            out_features=num_classes,
                            bias=True)
        pass