Esempio n. 1
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 strides,
                 expansion_factor,
                 se_ratio=None,
                 data_format='channels_last',
                 name='mbconv_block'):
        assert kernel_size in (3, 5)
        super(MBConv, self).__init__(name=name)
        self.residual = (in_channels == out_channels) and (strides == 1)
        inner_channels = in_channels * expansion_factor
        channel_axis = -1 if data_format == 'channels_last' else 1
        dwconv_padding = 1 if kernel_size == 3 else 2
        self.conv1x1_expand = tf.keras.Sequential([
            Conv2d(in_channels,
                   inner_channels,
                   kernel_size=1,
                   strides=1,
                   padding=0,
                   use_bias=False,
                   data_format=data_format,
                   name=name + '/expand/conv1x1'),
            BatchNormalization(axis=channel_axis, name=name + '/expand/bn'),
            ReLU(name=name + '/expand/activ')
        ])

        self.dwconv = tf.keras.Sequential([
            Conv2d(inner_channels,
                   inner_channels,
                   kernel_size=kernel_size,
                   strides=strides,
                   padding=dwconv_padding,
                   groups=inner_channels,
                   use_bias=False,
                   data_format=data_format,
                   name=name + '/dwconv/dwconv'),
            BatchNormalization(axis=channel_axis, name=name + '/dwconv/bn'),
            ReLU(name=name + '/dwconv/activ')
        ])
        se_reduction = int(inner_channels * se_ratio) if se_ratio else None
        self.se = SEBlock(
            inner_channels, se_reduction, data_format, name=name +
            '/se') if se_ratio else None

        self.conv1x1_project = tf.keras.Sequential([
            Conv2d(inner_channels,
                   out_channels,
                   kernel_size=1,
                   strides=1,
                   padding=0,
                   use_bias=False,
                   data_format=data_format,
                   name=name + '/project/conv1x1'),
            BatchNormalization(axis=channel_axis, name=name + '/project/bn')
        ])
Esempio n. 2
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 n_groups=2,
                 se_reduction=None,
                 data_format='channels_last',
                 name='shufflenetv2_downsample_unit'):
        super(ShuffleNetV2DownsampleUnit, self).__init__(name=name)
        self.channel_axis = -1 if data_format == 'channels_last' else 1
        branch_out_channels = out_channels - in_channels
        branch_mid_channels = out_channels // 2
        assert branch_mid_channels % 2 == 0

        self.short_cut_branch = tf.keras.Sequential([
            DWConv3x3_Block(in_channels,
                            strides=2,
                            activ=False,
                            data_format=data_format,
                            name=name + '/shortcut_dwconv3x3'),
            Conv1x1_Block(in_channels,
                          in_channels,
                          activ=True,
                          data_format=data_format,
                          name=name + '/shortcut_conv1x1')
        ])
        self.branch = tf.keras.Sequential([
            Conv1x1_Block(in_channels,
                          branch_mid_channels,
                          activ=True,
                          data_format=data_format,
                          name=name + '/branch_conv1x1_0'),
            DWConv3x3_Block(branch_mid_channels,
                            strides=2,
                            activ=False,
                            data_format=data_format,
                            name=name + '/branch_dwconv3x3'),
            Conv1x1_Block(branch_mid_channels,
                          branch_out_channels,
                          activ=True,
                          data_format=data_format,
                          name=name + '/branch_conv1x1_1')
        ])
        self.se = SEBlock(
            branch_out_channels, se_reduction, data_format, name=name +
            '/se') if se_reduction else None

        self.concat = Concatenate(axis=self.channel_axis,
                                  name=name + '/concat')
        self.shuffle = ChannelShuffle(data_format,
                                      n_groups,
                                      name=name + '/channel_shuffle')
Esempio n. 3
0
 def __init__(self, out_channels, use_bn, increase, scales):
     super(Features, self).__init__()
     self.scales = scales
     # Regress 5 different scales of heatmaps per stack
     self.before_regress = nn.ModuleList([
         nn.Sequential(
             conv3x3_block(in_channels=(out_channels + i * increase),
                           out_channels=out_channels,
                           bias=(not use_bn),
                           use_bn=use_bn),
             conv3x3_block(in_channels=out_channels,
                           out_channels=out_channels,
                           bias=(not use_bn),
                           use_bn=use_bn),
             SEBlock(channels=out_channels),
         ) for i in range(scales)
     ])
    def __init__(self,
                 in_channels,
                 channels,
                 strides=1,
                 downsample=None,
                 groups=1,
                 base_width=64,
                 dilation=1,
                 se_reduction=None,
                 data_format='channels_last',
                 name='residual_bottleneck_block'):
        super(ResBottleneckBlock, self).__init__()
        mid_channels = int(channels * (base_width / 64.)) * groups

        self.conv1 = conv1x1(in_channels, mid_channels, name=name + '/conv1')
        self.bn1 = BatchNormalization(
            axis=-1 if data_format == 'channels_last' else 1,
            name=name + '/bn1')

        self.conv2 = conv3x3(mid_channels,
                             mid_channels,
                             strides,
                             dilation,
                             groups,
                             name='/conv2')
        self.bn2 = BatchNormalization(
            axis=-1 if data_format == 'channels_last' else 1,
            name=name + '/bn2')

        self.conv3 = conv1x1(mid_channels,
                             channels * self.expansion,
                             name=name + '/conv3')
        self.bn3 = BatchNormalization(
            axis=-1 if data_format == 'channels_last' else 1,
            name=name + '/bn3')

        self.activation = ReLU()
        self.downsample = downsample
        self.se = None
        if se_reduction:
            self.se = SEBlock(channels=channels * self.expansion,
                              reduction=se_reduction,
                              data_format=data_format,
                              name=name + '/se')
Esempio n. 5
0
    def __init__(self,
                 channels,
                 n_groups=2,
                 se_reduction=None,
                 residual=False,
                 data_format='channels_last',
                 name='shufflenetv2_basic_unit'):
        assert channels % 2 == 0
        super(ShuffleNetV2BasicUnit, self).__init__(name=name)
        self.channel_axis = -1 if data_format == 'channels_last' else 1

        mid_channels = channels // 2
        self.branch = tf.keras.Sequential([
            Conv1x1_Block(mid_channels,
                          mid_channels,
                          activ=True,
                          data_format=data_format,
                          name=name + '/conv1x1_0'),
            DWConv3x3_Block(mid_channels,
                            strides=1,
                            activ=False,
                            data_format=data_format,
                            name=name + '/dwconv3x3'),
            Conv1x1_Block(mid_channels,
                          mid_channels,
                          activ=residual is False,
                          data_format=data_format,
                          name=name + '/conv1x1_1')
        ])

        self.se = SEBlock(
            mid_channels, se_reduction, data_format, name=name +
            '/se') if se_reduction else None
        self.residual_activ = ReLU(name=name +
                                   '/residual_activ') if residual else None

        self.concat = Concatenate(axis=self.channel_axis,
                                  name=name + '/concat')
        self.shuffle = ChannelShuffle(data_format,
                                      n_groups,
                                      name=name + '/channel_shuffle')
Esempio n. 6
0
 def __init__(self,
              out_channels,
              use_bn,
              activation):
     super(IbpPreBlock, self).__init__()
     self.conv1 = conv3x3_block(
         in_channels=out_channels,
         out_channels=out_channels,
         bias=(not use_bn),
         use_bn=use_bn,
         activation=activation)
     self.conv2 = conv3x3_block(
         in_channels=out_channels,
         out_channels=out_channels,
         bias=(not use_bn),
         use_bn=use_bn,
         activation=activation)
     self.se = SEBlock(
         channels=out_channels,
         use_conv=False,
         mid_activation=activation)
    def __init__(self, in_channels, out_channels, stride, cardinality,
                 bottleneck_width, identity_conv3x3):
        super(SENetUnit, self).__init__()
        self.resize_identity = (in_channels != out_channels) or (stride != 1)

        self.body = SENetBottleneck(in_channels=in_channels,
                                    out_channels=out_channels,
                                    stride=stride,
                                    cardinality=cardinality,
                                    bottleneck_width=bottleneck_width)
        self.se = SEBlock(channels=out_channels)
        if self.resize_identity:
            if identity_conv3x3:
                self.identity_conv = conv3x3_block(in_channels=in_channels,
                                                   out_channels=out_channels,
                                                   stride=stride,
                                                   activate=False)
            else:
                self.identity_conv = conv1x1_block(in_channels=in_channels,
                                                   out_channels=out_channels,
                                                   stride=stride,
                                                   activate=False)
        self.activ = nn.ReLU(inplace=True)
 def __init__(self,
              in_channels,
              channels,
              strides=1,
              downsample=None,
              groups=1,
              base_width=64,
              dilation=1,
              se_reduction=None,
              data_format='channels_last',
              name='residual_block'):
     super(ResBlock, self).__init__()
     assert (groups == 1) and (base_width == 64) and (dilation == 1)
     self.conv1 = conv3x3(in_channels,
                          channels,
                          strides,
                          data_format=data_format,
                          name=name + '/conv1')
     self.bn1 = BatchNormalization(
         axis=-1 if data_format == 'channels_last' else 1,
         name=name + '/bn1')
     self.conv2 = conv3x3(channels,
                          channels,
                          data_format=data_format,
                          name=name + '/conv2')
     self.bn2 = BatchNormalization(
         axis=-1 if data_format == 'channels_last' else 1,
         name=name + '/bn2')
     self.downsample = downsample
     self.activation = ReLU()
     self.se = None
     if se_reduction:
         self.se = SEBlock(channels=channels * self.expansion,
                           reduction=se_reduction,
                           data_format=data_format,
                           name=name + '/se')