示例#1
0
文件: rexnet.py 项目: pgsrv/Holocron
    def __init__(self,
                 in_channels,
                 channels,
                 t,
                 stride,
                 use_se=True,
                 se_ratio=12,
                 act_layer=None,
                 norm_layer=None,
                 drop_layer=None):
        super().__init__()

        if act_layer is None:
            act_layer = nn.ReLU6(inplace=True)

        if norm_layer is None:
            norm_layer = nn.BatchNorm2d

        self.use_shortcut = stride == 1 and in_channels <= channels
        self.in_channels = in_channels
        self.out_channels = channels

        _layers = []
        if t != 1:
            dw_channels = in_channels * t
            _layers.extend(
                conv_sequence(in_channels,
                              dw_channels,
                              SiLU(),
                              norm_layer,
                              drop_layer,
                              kernel_size=1,
                              stride=1,
                              bias=False))
        else:
            dw_channels = in_channels

        _layers.extend(
            conv_sequence(dw_channels,
                          dw_channels,
                          None,
                          norm_layer,
                          drop_layer,
                          kernel_size=3,
                          stride=stride,
                          padding=1,
                          bias=False,
                          groups=dw_channels))

        if use_se:
            _layers.append(
                SEBlock(dw_channels, se_ratio, act_layer, norm_layer,
                        drop_layer))

        _layers.append(act_layer)
        _layers.extend(
            conv_sequence(dw_channels,
                          channels,
                          None,
                          norm_layer,
                          drop_layer,
                          kernel_size=1,
                          stride=1,
                          bias=False))
        self.conv = nn.Sequential(*_layers)
示例#2
0
文件: rexnet.py 项目: pgsrv/Holocron
    def __init__(self,
                 width_mult=1.0,
                 depth_mult=1.0,
                 num_classes=1000,
                 in_channels=3,
                 in_planes=16,
                 final_planes=180,
                 use_se=True,
                 se_ratio=12,
                 dropout_ratio=0.2,
                 bn_momentum=0.9,
                 act_layer=None,
                 norm_layer=None,
                 drop_layer=None):
        """Mostly adapted from https://github.com/clovaai/rexnet/blob/master/rexnetv1.py"""
        super().__init__()

        if act_layer is None:
            act_layer = SiLU()
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d

        num_blocks = [1, 2, 2, 3, 3, 5]
        strides = [1, 2, 2, 2, 1, 2]
        num_blocks = [ceil(element * depth_mult) for element in num_blocks]
        strides = sum([[element] + [1] * (num_blocks[idx] - 1)
                       for idx, element in enumerate(strides)], [])
        depth = sum(num_blocks)

        stem_channel = 32 / width_mult if width_mult < 1.0 else 32
        inplanes = in_planes / width_mult if width_mult < 1.0 else in_planes

        # The following channel configuration is a simple instance to make each layer become an expand layer
        chans = [int(round(width_mult * stem_channel))]
        chans.extend([
            int(round(width_mult * (inplanes + idx * final_planes / depth)))
            for idx in range(depth)
        ])

        ses = [False] * (num_blocks[0] + num_blocks[1]) + [use_se] * sum(
            num_blocks[2:])

        _layers = conv_sequence(in_channels,
                                chans[0],
                                act_layer,
                                norm_layer,
                                drop_layer,
                                kernel_size=3,
                                stride=2,
                                padding=1,
                                bias=False)

        t = 1
        for in_c, c, s, se in zip(chans[:-1], chans[1:], strides, ses):
            _layers.append(
                ReXBlock(in_channels=in_c,
                         channels=c,
                         t=t,
                         stride=s,
                         use_se=se,
                         se_ratio=se_ratio))
            t = 6

        pen_channels = int(width_mult * 1280)
        _layers.extend(
            conv_sequence(chans[-1],
                          pen_channels,
                          act_layer,
                          norm_layer,
                          drop_layer,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          bias=False))

        super().__init__(
            OrderedDict([('features', nn.Sequential(*_layers)),
                         ('pool', GlobalAvgPool2d(flatten=True)),
                         ('head',
                          nn.Sequential(nn.Dropout(dropout_ratio),
                                        nn.Linear(pen_channels,
                                                  num_classes)))]))