def __init__(self,
                 in_channels,
                 mid_channels,
                 out_channels,
                 kernel_size=3,
                 norm_cfg=dict(type='BN'),
                 act_cfg=dict(type='ReLU'),
                 init_cfg=None,
                 **kwargs):
        super(CorrelationHead, self).__init__(init_cfg)
        self.kernel_convs = ConvModule(in_channels=in_channels,
                                       out_channels=mid_channels,
                                       kernel_size=kernel_size,
                                       norm_cfg=norm_cfg,
                                       act_cfg=act_cfg)

        self.search_convs = ConvModule(in_channels=in_channels,
                                       out_channels=mid_channels,
                                       kernel_size=kernel_size,
                                       norm_cfg=norm_cfg,
                                       act_cfg=act_cfg)

        self.head_convs = nn.Sequential(
            ConvModule(in_channels=mid_channels,
                       out_channels=mid_channels,
                       kernel_size=1,
                       norm_cfg=norm_cfg,
                       act_cfg=act_cfg),
            ConvModule(in_channels=mid_channels,
                       out_channels=out_channels,
                       kernel_size=1,
                       act_cfg=None))
def test_revert_mmsyncbn():
    if 'SLURM_NTASKS' not in os.environ or int(os.environ['SLURM_NTASKS']) < 2:
        print('Must run on slurm with more than 1 process!\n'
              'srun -p test --gres=gpu:2 -n2')
        return
    rank = int(os.environ['SLURM_PROCID'])
    world_size = int(os.environ['SLURM_NTASKS'])
    local_rank = int(os.environ['SLURM_LOCALID'])
    node_list = str(os.environ['SLURM_NODELIST'])

    node_parts = re.findall('[0-9]+', node_list)
    os.environ['MASTER_ADDR'] = (f'{node_parts[1]}.{node_parts[2]}' +
                                 f'.{node_parts[3]}.{node_parts[4]}')
    os.environ['MASTER_PORT'] = '12341'
    os.environ['WORLD_SIZE'] = str(world_size)
    os.environ['RANK'] = str(rank)

    dist.init_process_group('nccl')
    torch.cuda.set_device(local_rank)
    x = torch.randn(1, 3, 10, 10).cuda()
    dist.broadcast(x, src=0)
    conv = ConvModule(3, 8, 2, norm_cfg=dict(type='MMSyncBN')).cuda()
    conv.eval()
    y_mmsyncbn = conv(x).detach().cpu().numpy()
    conv = revert_sync_batchnorm(conv)
    y_bn = conv(x).detach().cpu().numpy()
    assert np.all(np.isclose(y_bn, y_mmsyncbn, 1e-3))
    conv, x = conv.to('cpu'), x.to('cpu')
    y_bn_cpu = conv(x).detach().numpy()
    assert np.all(np.isclose(y_bn, y_bn_cpu, 1e-3))
Exemple #3
0
    def __init__(self,
                 in_channels,
                 out_channels=2048,
                 norm_cfg=dict(type='BN', momentum=0.1),
                 init_cfg=dict(type='Normal', layer='Linear', std=0.01)):
        super(HRFuseScales, self).__init__(init_cfg=init_cfg)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.norm_cfg = norm_cfg

        block_type = Bottleneck
        out_channels = [128, 256, 512, 1024]

        # Increase the channels on each resolution
        # from C, 2C, 4C, 8C to 128, 256, 512, 1024
        increase_layers = []
        for i in range(len(in_channels)):
            increase_layers.append(
                ResLayer(
                    block_type,
                    in_channels=in_channels[i],
                    out_channels=out_channels[i],
                    num_blocks=1,
                    stride=1,
                ))
        self.increase_layers = nn.ModuleList(increase_layers)

        # Downsample feature maps in each scale.
        downsample_layers = []
        for i in range(len(in_channels) - 1):
            downsample_layers.append(
                ConvModule(
                    in_channels=out_channels[i],
                    out_channels=out_channels[i + 1],
                    kernel_size=3,
                    stride=2,
                    padding=1,
                    norm_cfg=self.norm_cfg,
                    bias=False,
                ))
        self.downsample_layers = nn.ModuleList(downsample_layers)

        # The final conv block before final classifier linear layer.
        self.final_layer = ConvModule(
            in_channels=out_channels[3],
            out_channels=self.out_channels,
            kernel_size=1,
            norm_cfg=self.norm_cfg,
            bias=False,
        )
Exemple #4
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 mid_channels,
                 kernel_size=3,
                 stride=1,
                 se_cfg=None,
                 with_residual=True,
                 conv_cfg=None,
                 norm_cfg=dict(type='BN'),
                 act_cfg=dict(type='ReLU'),
                 drop_path_rate=0.,
                 with_cp=False,
                 init_cfg=None,
                 **kwargs):
        super(EdgeResidual, self).__init__(init_cfg=init_cfg)
        assert stride in [1, 2]
        self.with_cp = with_cp
        self.drop_path = DropPath(
            drop_path_rate) if drop_path_rate > 0 else nn.Identity()
        self.with_se = se_cfg is not None
        self.with_residual = (stride == 1 and in_channels == out_channels
                              and with_residual)

        if self.with_se:
            assert isinstance(se_cfg, dict)

        self.conv1 = ConvModule(in_channels=in_channels,
                                out_channels=mid_channels,
                                kernel_size=kernel_size,
                                stride=1,
                                padding=kernel_size // 2,
                                conv_cfg=conv_cfg,
                                norm_cfg=norm_cfg,
                                act_cfg=act_cfg)

        if self.with_se:
            self.se = SELayer(**se_cfg)

        self.conv2 = ConvModule(in_channels=mid_channels,
                                out_channels=out_channels,
                                kernel_size=1,
                                stride=stride,
                                padding=0,
                                conv_cfg=conv_cfg,
                                norm_cfg=norm_cfg,
                                act_cfg=None)
def test_revert_syncbn():
    conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
    x = torch.randn(1, 3, 10, 10)
    # Expect a ValueError prompting that SyncBN is not supported on CPU
    with pytest.raises(ValueError):
        y = conv(x)
    conv = revert_sync_batchnorm(conv)
    y = conv(x)
    assert y.shape == (1, 8, 9, 9)
def test_bias():
    # bias: auto, without norm
    conv = ConvModule(3, 8, 2)
    assert conv.conv.bias is not None

    # bias: auto, with norm
    conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
    assert conv.conv.bias is None

    # bias: False, without norm
    conv = ConvModule(3, 8, 2, bias=False)
    assert conv.conv.bias is None

    # bias: True, with norm
    with pytest.warns(UserWarning) as record:
        ConvModule(3, 8, 2, bias=True, norm_cfg=dict(type='BN'))
    assert len(record) == 1
    assert record[0].message.args[
        0] == 'ConvModule has norm and bias at the same time'
Exemple #7
0
 def conv_module(in_planes, out_planes, kernel_size=3, padding=1):
     # The module's pipeline: Conv -> BN -> ReLU.
     return ConvModule(
         in_channels=in_planes,
         out_channels=out_planes,
         kernel_size=kernel_size,
         padding=padding,
         bias=True,
         norm_cfg=dict(type='BN', requires_grad=True),
         act_cfg=dict(type='ReLU'),
         inplace=True)
Exemple #8
0
def test_bias():
    # bias: auto, without norm
    conv = ConvModule(3, 8, 2)
    assert conv.conv.bias is not None

    # bias: auto, with norm
    conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
    assert conv.conv.bias is None

    # bias: False, without norm
    conv = ConvModule(3, 8, 2, bias=False)
    assert conv.conv.bias is None

    # bias: True, with batch norm
    with pytest.warns(UserWarning) as record:
        ConvModule(3, 8, 2, bias=True, norm_cfg=dict(type='BN'))
    assert len(record) == 1
    assert record[0].message.args[
        0] == 'Unnecessary conv bias before batch/instance norm'

    # bias: True, with instance norm
    with pytest.warns(UserWarning) as record:
        ConvModule(3, 8, 2, bias=True, norm_cfg=dict(type='IN'))
    assert len(record) == 1
    assert record[0].message.args[
        0] == 'Unnecessary conv bias before batch/instance norm'

    # bias: True, with other norm
    with pytest.warns(UserWarning) as record:
        norm_cfg = dict(type='GN', num_groups=1)
        ConvModule(3, 8, 2, bias=True, norm_cfg=norm_cfg)
        warnings.warn('No warnings')
    assert len(record) == 1
    assert record[0].message.args[0] == 'No warnings'
Exemple #9
0
def test_order():

    with pytest.raises(AssertionError):
        # order must be a tuple
        order = ['conv', 'norm', 'act']
        ConvModule(3, 8, 2, order=order)

    with pytest.raises(AssertionError):
        # length of order must be 3
        order = ('conv', 'norm')
        ConvModule(3, 8, 2, order=order)

    with pytest.raises(AssertionError):
        # order must be an order of 'conv', 'norm', 'act'
        order = ('conv', 'norm', 'norm')
        ConvModule(3, 8, 2, order=order)

    with pytest.raises(AssertionError):
        # order must be an order of 'conv', 'norm', 'act'
        order = ('conv', 'norm', 'something')
        ConvModule(3, 8, 2, order=order)

    # ('conv', 'norm', 'act')
    conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
    out = conv('input')
    assert out == 'input_conv_bn_relu'

    # ('norm', 'conv', 'act')
    conv = ConvModule(3,
                      8,
                      2,
                      norm_cfg=dict(type='BN'),
                      order=('norm', 'conv', 'act'))
    out = conv('input')
    assert out == 'input_bn_conv_relu'

    # ('conv', 'norm', 'act'), activate=False
    conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
    out = conv('input', activate=False)
    assert out == 'input_conv_bn'

    # ('conv', 'norm', 'act'), activate=False
    conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
    out = conv('input', norm=False)
    assert out == 'input_conv_relu'
Exemple #10
0
    def __init__(self,
                 fp_channels=((768, 256, 256), (384, 256, 256),
                              (320, 256, 128), (128 + 6, 128, 128, 128)),
                 fp_norm_cfg=dict(type='BN2d'),
                 **kwargs):
        super(PAConvHead, self).__init__(fp_channels, fp_norm_cfg, **kwargs)

        # https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/model/pointnet2/pointnet2_paconv_seg.py#L53
        # PointNet++'s decoder conv has bias while PAConv's doesn't have
        # so we need to rebuild it here
        self.pre_seg_conv = ConvModule(fp_channels[-1][-1],
                                       self.channels,
                                       kernel_size=1,
                                       bias=False,
                                       conv_cfg=self.conv_cfg,
                                       norm_cfg=self.norm_cfg,
                                       act_cfg=self.act_cfg)
Exemple #11
0
    def __init__(self,
                 fp_channels=((768, 256, 256), (384, 256, 256),
                              (320, 256, 128), (128, 128, 128, 128)),
                 **kwargs):
        super(PointNet2Head, self).__init__(**kwargs)

        self.num_fp = len(fp_channels)
        self.FP_modules = nn.ModuleList()
        for cur_fp_mlps in fp_channels:
            self.FP_modules.append(PointFPModule(mlp_channels=cur_fp_mlps))

        # https://github.com/charlesq34/pointnet2/blob/master/models/pointnet2_sem_seg.py#L40
        self.pre_seg_conv = ConvModule(fp_channels[-1][-1],
                                       self.channels,
                                       kernel_size=1,
                                       bias=True,
                                       conv_cfg=self.conv_cfg,
                                       norm_cfg=self.norm_cfg,
                                       act_cfg=self.act_cfg)
Exemple #12
0
def test_revert_sync_batchnorm():
    conv_syncbn = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN')).to('cpu')
    conv_syncbn.train()
    x = torch.randn(1, 3, 10, 10)
    # Will raise an ValueError saying SyncBN does not run on CPU
    with pytest.raises(ValueError):
        y = conv_syncbn(x)
    conv_bn = revert_sync_batchnorm(conv_syncbn)
    y = conv_bn(x)
    assert y.shape == (1, 8, 9, 9)
    assert conv_bn.training == conv_syncbn.training
    conv_syncbn.eval()
    conv_bn = revert_sync_batchnorm(conv_syncbn)
    assert conv_bn.training == conv_syncbn.training
Exemple #13
0
 def __init__(self,
              num_convs=1,
              channels=256,
              kernel_size=3,
              norm_cfg=None,
              act_cfg=dict(type='ReLU')):
     super(EmbedAggregator, self).__init__()
     assert num_convs > 0, 'The number of convs must be bigger than 1.'
     self.embed_convs = nn.ModuleList()
     for i in range(num_convs):
         if i == num_convs - 1:
             new_norm_cfg = None
             new_act_cfg = None
         else:
             new_norm_cfg = norm_cfg
             new_act_cfg = act_cfg
         self.embed_convs.append(
             ConvModule(in_channels=channels,
                        out_channels=channels,
                        kernel_size=kernel_size,
                        padding=(kernel_size - 1) // 2,
                        norm_cfg=new_norm_cfg,
                        act_cfg=new_act_cfg))
Exemple #14
0
def test_conv_module():
    with pytest.raises(AssertionError):
        # conv_cfg must be a dict or None
        conv_cfg = 'conv'
        ConvModule(3, 8, 2, conv_cfg=conv_cfg)

    with pytest.raises(AssertionError):
        # norm_cfg must be a dict or None
        norm_cfg = 'norm'
        ConvModule(3, 8, 2, norm_cfg=norm_cfg)

    with pytest.raises(KeyError):
        # softmax is not supported
        act_cfg = dict(type='softmax')
        ConvModule(3, 8, 2, act_cfg=act_cfg)

    # conv + norm + act
    conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
    assert conv.with_activation
    assert hasattr(conv, 'activate')
    assert conv.with_norm
    assert hasattr(conv, 'norm')
    x = torch.rand(1, 3, 256, 256)
    output = conv(x)
    assert output.shape == (1, 8, 255, 255)

    # conv + act
    conv = ConvModule(3, 8, 2)
    assert conv.with_activation
    assert hasattr(conv, 'activate')
    assert not conv.with_norm
    assert not hasattr(conv, 'norm')
    x = torch.rand(1, 3, 256, 256)
    output = conv(x)
    assert output.shape == (1, 8, 255, 255)

    # conv
    conv = ConvModule(3, 8, 2, act_cfg=None)
    assert not conv.with_norm
    assert not hasattr(conv, 'norm')
    assert not conv.with_activation
    assert not hasattr(conv, 'activate')
    x = torch.rand(1, 3, 256, 256)
    output = conv(x)
    assert output.shape == (1, 8, 255, 255)

    # with_spectral_norm=True
    conv = ConvModule(3, 8, 3, padding=1, with_spectral_norm=True)
    assert hasattr(conv.conv, 'weight_orig')
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # padding_mode='reflect'
    conv = ConvModule(3, 8, 3, padding=1, padding_mode='reflect')
    assert isinstance(conv.padding_layer, nn.ReflectionPad2d)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # non-existing padding mode
    with pytest.raises(KeyError):
        conv = ConvModule(3, 8, 3, padding=1, padding_mode='non_exists')

    # leaky relu
    conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='LeakyReLU'))
    assert isinstance(conv.activate, nn.LeakyReLU)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)
Exemple #15
0
    def __init__(self,
                 arch='b0',
                 drop_path_rate=0.,
                 out_indices=(6, ),
                 frozen_stages=0,
                 conv_cfg=dict(type='Conv2dAdaptivePadding'),
                 norm_cfg=dict(type='BN', eps=1e-3),
                 act_cfg=dict(type='Swish'),
                 norm_eval=False,
                 with_cp=False,
                 init_cfg=[
                     dict(type='Kaiming', layer='Conv2d'),
                     dict(type='Constant',
                          layer=['_BatchNorm', 'GroupNorm'],
                          val=1)
                 ]):
        super(EfficientNet, self).__init__(init_cfg)
        assert arch in self.arch_settings, \
            f'"{arch}" is not one of the arch_settings ' \
            f'({", ".join(self.arch_settings.keys())})'
        self.arch_setting = self.arch_settings[arch]
        self.layer_setting = self.layer_settings[arch[:1]]
        for index in out_indices:
            if index not in range(0, len(self.layer_setting)):
                raise ValueError('the item in out_indices must in '
                                 f'range(0, {len(self.layer_setting)}). '
                                 f'But received {index}')

        if frozen_stages not in range(len(self.layer_setting) + 1):
            raise ValueError('frozen_stages must be in range(0, '
                             f'{len(self.layer_setting) + 1}). '
                             f'But received {frozen_stages}')
        self.drop_path_rate = drop_path_rate
        self.out_indices = out_indices
        self.frozen_stages = frozen_stages
        self.conv_cfg = conv_cfg
        self.norm_cfg = norm_cfg
        self.act_cfg = act_cfg
        self.norm_eval = norm_eval
        self.with_cp = with_cp

        self.layer_setting = model_scaling(self.layer_setting,
                                           self.arch_setting)
        block_cfg_0 = self.layer_setting[0][0]
        block_cfg_last = self.layer_setting[-1][0]
        self.in_channels = make_divisible(block_cfg_0[1], 8)
        self.out_channels = block_cfg_last[1]
        self.layers = nn.ModuleList()
        self.layers.append(
            ConvModule(in_channels=3,
                       out_channels=self.in_channels,
                       kernel_size=block_cfg_0[0],
                       stride=block_cfg_0[3],
                       padding=block_cfg_0[0] // 2,
                       conv_cfg=self.conv_cfg,
                       norm_cfg=self.norm_cfg,
                       act_cfg=self.act_cfg))
        self.make_layer()
        # Avoid building unused layers in mmdetection.
        if len(self.layers) < max(self.out_indices) + 1:
            self.layers.append(
                ConvModule(in_channels=self.in_channels,
                           out_channels=self.out_channels,
                           kernel_size=block_cfg_last[0],
                           stride=block_cfg_last[3],
                           padding=block_cfg_last[0] // 2,
                           conv_cfg=self.conv_cfg,
                           norm_cfg=self.norm_cfg,
                           act_cfg=self.act_cfg))
Exemple #16
0
    def __init__(self,
                 img_scale_factor,
                 out_indices=[2, 3, 4, 5, 6],
                 flow_scale_factor=5.0,
                 flow_img_norm_std=[255.0, 255.0, 255.0],
                 flow_img_norm_mean=[0.411, 0.432, 0.450]):
        super(FlowNetSimple, self).__init__()
        self.img_scale_factor = img_scale_factor
        self.out_indices = out_indices
        self.flow_scale_factor = flow_scale_factor
        self.flow_img_norm_mean = flow_img_norm_mean
        self.flow_img_norm_std = flow_img_norm_std

        self.conv_layers = []
        conv_layers_setting = self.arch_setting['conv_layers']
        for i in range(len(conv_layers_setting['inplanes'])):
            num_convs = conv_layers_setting['num_convs'][i]
            kernel_size = conv_layers_setting['kernel_size'][i]
            inplanes = conv_layers_setting['inplanes'][i]
            if i == len(conv_layers_setting['inplanes']) - 1:
                planes = 2 * inplanes
            else:
                planes = conv_layers_setting['inplanes'][i + 1]

            conv_layer = nn.ModuleList()
            conv_layer.append(
                ConvModule(in_channels=inplanes,
                           out_channels=planes,
                           kernel_size=kernel_size,
                           stride=2,
                           padding=(kernel_size - 1) // 2,
                           bias=True,
                           conv_cfg=dict(type='Conv'),
                           act_cfg=dict(type='LeakyReLU', negative_slope=0.1)))
            for j in range(1, num_convs):
                kernel_size = 3 if i == 2 else kernel_size
                conv_layer.append(
                    ConvModule(in_channels=planes,
                               out_channels=planes,
                               kernel_size=kernel_size,
                               stride=1,
                               padding=(kernel_size - 1) // 2,
                               bias=True,
                               conv_cfg=dict(type='Conv'),
                               act_cfg=dict(type='LeakyReLU',
                                            negative_slope=0.1)))

            self.add_module(f'conv{i+1}', conv_layer)
            self.conv_layers.append(f'conv{i+1}')

        self.deconv_layers = []
        self.flow_layers = []
        self.upflow_layers = []
        deconv_layers_setting = self.arch_setting['deconv_layers']
        planes = deconv_layers_setting['inplanes'][-1] // 2
        for i in range(len(deconv_layers_setting['inplanes']) - 1, -1, -1):
            inplanes = deconv_layers_setting['inplanes'][i]

            deconv_layer = ConvModule(in_channels=inplanes,
                                      out_channels=planes,
                                      kernel_size=4,
                                      stride=2,
                                      padding=1,
                                      bias=False,
                                      conv_cfg=dict(type='deconv'),
                                      act_cfg=dict(type='LeakyReLU',
                                                   negative_slope=0.1))
            self.add_module(f'deconv{i+2}', deconv_layer)
            self.deconv_layers.insert(0, f'deconv{i+2}')

            flow_layer = ConvModule(in_channels=inplanes,
                                    out_channels=2,
                                    kernel_size=3,
                                    stride=1,
                                    padding=1,
                                    bias=False,
                                    conv_cfg=dict(type='Conv'),
                                    act_cfg=None)
            self.add_module(f'predict_flow{i+3}', flow_layer)
            self.flow_layers.insert(0, f'predict_flow{i+3}')

            upflow_layer = ConvModule(in_channels=2,
                                      out_channels=2,
                                      kernel_size=4,
                                      stride=2,
                                      padding=1,
                                      bias=False,
                                      conv_cfg=dict(type='deconv'),
                                      act_cfg=None)
            self.add_module(f'upsample_flow{i+2}', upflow_layer)
            self.upflow_layers.insert(0, f'upsample_flow{i+2}')
            planes = planes // 2

        self.predict_flow = ConvModule(in_channels=planes * (2 + 4) + 2,
                                       out_channels=2,
                                       kernel_size=3,
                                       stride=1,
                                       padding=1,
                                       bias=False,
                                       conv_cfg=dict(type='Conv'),
                                       act_cfg=None)
Exemple #17
0
def test_conv_module():
    with pytest.raises(AssertionError):
        # conv_cfg must be a dict or None
        conv_cfg = 'conv'
        ConvModule(3, 8, 2, conv_cfg=conv_cfg)

    with pytest.raises(AssertionError):
        # norm_cfg must be a dict or None
        norm_cfg = 'norm'
        ConvModule(3, 8, 2, norm_cfg=norm_cfg)

    with pytest.raises(KeyError):
        # softmax is not supported
        act_cfg = dict(type='softmax')
        ConvModule(3, 8, 2, act_cfg=act_cfg)

    # conv + norm + act
    conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
    assert conv.with_activation
    assert hasattr(conv, 'activate')
    assert conv.with_norm
    assert hasattr(conv, 'norm')
    x = torch.rand(1, 3, 256, 256)
    output = conv(x)
    assert output.shape == (1, 8, 255, 255)

    # conv + act
    conv = ConvModule(3, 8, 2)
    assert conv.with_activation
    assert hasattr(conv, 'activate')
    assert not conv.with_norm
    assert conv.norm is None
    x = torch.rand(1, 3, 256, 256)
    output = conv(x)
    assert output.shape == (1, 8, 255, 255)

    # conv
    conv = ConvModule(3, 8, 2, act_cfg=None)
    assert not conv.with_norm
    assert conv.norm is None
    assert not conv.with_activation
    assert not hasattr(conv, 'activate')
    x = torch.rand(1, 3, 256, 256)
    output = conv(x)
    assert output.shape == (1, 8, 255, 255)

    # conv with its own `init_weights` method
    conv_module = ConvModule(3,
                             8,
                             2,
                             conv_cfg=dict(type='ExampleConv'),
                             act_cfg=None)
    assert torch.equal(conv_module.conv.conv0.weight, torch.zeros(8, 3, 2, 2))

    # with_spectral_norm=True
    conv = ConvModule(3, 8, 3, padding=1, with_spectral_norm=True)
    assert hasattr(conv.conv, 'weight_orig')
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # padding_mode='reflect'
    conv = ConvModule(3, 8, 3, padding=1, padding_mode='reflect')
    assert isinstance(conv.padding_layer, nn.ReflectionPad2d)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # non-existing padding mode
    with pytest.raises(KeyError):
        conv = ConvModule(3, 8, 3, padding=1, padding_mode='non_exists')

    # leaky relu
    conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='LeakyReLU'))
    assert isinstance(conv.activate, nn.LeakyReLU)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # tanh
    conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='Tanh'))
    assert isinstance(conv.activate, nn.Tanh)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # Sigmoid
    conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='Sigmoid'))
    assert isinstance(conv.activate, nn.Sigmoid)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # PReLU
    conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='PReLU'))
    assert isinstance(conv.activate, nn.PReLU)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # HSwish
    conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='HSwish'))
    assert isinstance(conv.activate, HSwish)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # HSigmoid
    conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='HSigmoid'))
    assert isinstance(conv.activate, HSigmoid)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)