示例#1
0
 def test_scale_layer1(self):
     dict1 = Scale(name='scale',
                   src_layers=[Pooling(name='pool')]).to_model_params()
     self.assertTrue(self.sample_syntax['scale1'] == dict1)
示例#2
0
 def test_scale_layer2(self):
     if not __dev__:
         with self.assertRaises(DLPyError):
             Scale(not_a_parameter=1)
示例#3
0
    def _MBConvBlock(inputs, in_channels, out_channels, ksize, stride, expansion, se_ratio, stage_id, block_id,
                     noskip=False, activation_fn='relu'):
        '''
        Inverted Residual Block

        Parameters
        ----------
        inputs: input tensor
            Speecify input tensor for block.
        in_channels: integer
            Specifies the number of input tensor's channel.
        out_channels: integer
            Specifies the number of output tensor's channel
        ksize:
            Specifies the kernel size of the convolution
        stride: integer
            Specifies the stride of the convolution
        expansion: double
            Specifies the expansion factor for the input layer.
        se_ratio: double
            Specifies the ratio to squeeze the input filters for squeeze-and-excitation block.
        stage_id: integer
            Specifies stage id for naming layers
        block_id:
            Specifies block id for naming layers
        noskip: bool
            Specifies whether the skip connection is used. By default, the skip connection is used.
        activation_fn:
            Specifies activation function
        '''

        # mobilenetv2 block is also known as inverted residual block, which consists of three convolutions:
        # the first is 1*1 convolution for expansion
        # the second is depthwise convolution
        # the third is 1*1 convolution without any non-linearity for projection

        x = inputs
        prefix = 'stage_{}_block_{}'.format(stage_id, block_id)
        n_groups = in_channels  # for expansion=1, n_groups might be different from pointwise_filters

        if expansion > 1:
            # For MobileNet V2, expansion>1 when stage>0
            n_groups = int(expansion * in_channels)  ## update n_groups
            x = Conv2d(n_groups, 1, include_bias=False, act='identity',
                       name=prefix + 'expand')(x)
            x = BN(name=prefix + 'expand_BN', act='identity')(x)

        # Depthwise convolution
        x = GroupConv2d(n_groups, n_groups, ksize, stride=stride, act='identity',
                        include_bias=False, name=prefix + 'depthwise')(x)
        x = BN(name=prefix + 'depthwise_BN', act=activation_fn)(x)

        # Squeeze-Excitation
        if 0 < se_ratio <= 1:
            se_input = x  # features to be squeezed
            x = GlobalAveragePooling2D(name=prefix + "global_avg_pool")(x)
            # Squeeze
            channels_se = max(1, int(in_channels * se_ratio))
            x = Conv2d(channels_se, 1, include_bias=True, act=activation_fn, name=prefix + 'squeeze')(x)
            x = Conv2d(n_groups, 1, include_bias=True, act='sigmoid', name=prefix + 'excitation')(x)
            x = Reshape(name=prefix + 'reshape', width=n_groups, height=1, depth=1)(x)
            x = Scale(name=prefix + 'scale')([se_input, x])  # x = out*w

        # Project
        x = Conv2d(out_channels, 1, include_bias=False, act='identity', name=prefix + 'project')(x)
        x = BN(name=prefix + 'project_BN', act='identity')(x)  # identity activation on narrow tensor
        # Prepare output for MBConv block
        if in_channels == out_channels and stride == 1 and (not noskip):
            # dropout can be added.
            return Res(name=prefix + 'add_se_residual')([x, inputs])
        else:
            return x