コード例 #1
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 weight_attr=None,
                 bias_attr=None,
                 lr_scale=1,
                 regularizer=None,
                 name=None):
        super(DeformableConvV2, self).__init__()
        self.offset_channel = 2 * kernel_size**2
        self.mask_channel = kernel_size**2

        if lr_scale == 1 and regularizer is None:
            offset_bias_attr = ParamAttr(
                initializer=Constant(0.),
                name='{}._conv_offset.bias'.format(name))
        else:
            offset_bias_attr = ParamAttr(
                initializer=Constant(0.),
                learning_rate=lr_scale,
                regularizer=regularizer,
                name='{}._conv_offset.bias'.format(name))
        self.conv_offset = nn.Conv2D(
            in_channels,
            3 * kernel_size**2,
            kernel_size,
            stride=stride,
            padding=(kernel_size - 1) // 2,
            weight_attr=ParamAttr(
                initializer=Constant(0.0),
                name='{}._conv_offset.weight'.format(name)),
            bias_attr=offset_bias_attr)

        if bias_attr:
            # in FCOS-DCN head, specifically need learning_rate and regularizer
            dcn_bias_attr = ParamAttr(
                name=name + "_bias",
                initializer=Constant(value=0),
                regularizer=L2Decay(0.),
                learning_rate=2.)
        else:
            # in ResNet backbone, do not need bias
            dcn_bias_attr = False
        self.conv_dcn = DeformConv2D(
            in_channels,
            out_channels,
            kernel_size,
            stride=stride,
            padding=(kernel_size - 1) // 2 * dilation,
            dilation=dilation,
            groups=groups,
            weight_attr=weight_attr,
            bias_attr=dcn_bias_attr)
コード例 #2
0
ファイル: resnet.py プロジェクト: xiegegege/PaddleDetection
    def __init__(self,
                 ch_in,
                 ch_out,
                 filter_size,
                 stride,
                 groups=1,
                 act=None,
                 norm_type='bn',
                 norm_decay=0.,
                 freeze_norm=True,
                 lr=1.0,
                 dcn_v2=False):
        super(ConvNormLayer, self).__init__()
        assert norm_type in ['bn', 'sync_bn']
        self.norm_type = norm_type
        self.act = act
        self.dcn_v2 = dcn_v2

        if not self.dcn_v2:
            self.conv = nn.Conv2D(in_channels=ch_in,
                                  out_channels=ch_out,
                                  kernel_size=filter_size,
                                  stride=stride,
                                  padding=(filter_size - 1) // 2,
                                  groups=groups,
                                  weight_attr=ParamAttr(learning_rate=lr),
                                  bias_attr=False)
        else:
            self.offset_channel = 2 * filter_size**2
            self.mask_channel = filter_size**2

            self.conv_offset = nn.Conv2D(
                in_channels=ch_in,
                out_channels=3 * filter_size**2,
                kernel_size=filter_size,
                stride=stride,
                padding=(filter_size - 1) // 2,
                weight_attr=ParamAttr(initializer=Constant(0.)),
                bias_attr=ParamAttr(initializer=Constant(0.)))
            self.conv = DeformConv2D(in_channels=ch_in,
                                     out_channels=ch_out,
                                     kernel_size=filter_size,
                                     stride=stride,
                                     padding=(filter_size - 1) // 2,
                                     dilation=1,
                                     groups=groups,
                                     weight_attr=ParamAttr(learning_rate=lr),
                                     bias_attr=False)

        norm_lr = 0. if freeze_norm else lr
        param_attr = ParamAttr(learning_rate=norm_lr,
                               regularizer=L2Decay(norm_decay),
                               trainable=False if freeze_norm else True)
        bias_attr = ParamAttr(learning_rate=norm_lr,
                              regularizer=L2Decay(norm_decay),
                              trainable=False if freeze_norm else True)

        global_stats = True if freeze_norm else False
        if norm_type == 'sync_bn':
            self.norm = nn.SyncBatchNorm(ch_out,
                                         weight_attr=param_attr,
                                         bias_attr=bias_attr)
        else:
            self.norm = nn.BatchNorm(ch_out,
                                     act=None,
                                     param_attr=param_attr,
                                     bias_attr=bias_attr,
                                     use_global_stats=global_stats)
        norm_params = self.norm.parameters()

        if freeze_norm:
            for param in norm_params:
                param.stop_gradient = True
コード例 #3
0
    def __init__(self,
                 input_dim,
                 filters,
                 filter_size,
                 stride=1,
                 bias_attr=False,
                 norm_type=None,
                 groups=1,
                 norm_groups=32,
                 act=None,
                 freeze_norm=False,
                 is_test=False,
                 norm_decay=0.,
                 lr=1.,
                 bias_lr=None,
                 weight_init=None,
                 bias_init=None,
                 use_dcn=False,
                 name=''):
        super(Conv2dUnit, self).__init__()
        self.filters = filters
        self.filter_size = filter_size
        self.stride = stride
        self.padding = (filter_size - 1) // 2
        self.act = act
        self.freeze_norm = freeze_norm
        self.is_test = is_test
        self.norm_decay = norm_decay
        self.use_dcn = use_dcn
        self.name = name

        # conv
        conv_name = name
        self.conv_offset = None
        if use_dcn:
            conv_battr = False
            if bias_attr:
                blr = lr
                if bias_lr:
                    blr = bias_lr
                conv_battr = ParamAttr(learning_rate=blr,
                                       initializer=bias_init,
                                       regularizer=L2Decay(0.))   # 不可以加正则化的参数:norm层(比如bn层、affine_channel层、gn层)的scale、offset;卷积层的偏移参数。

            self.offset_channel = 2 * filter_size**2
            self.mask_channel = filter_size**2

            self.conv_offset = nn.Conv2D(
                in_channels=input_dim,
                out_channels=3 * filter_size**2,
                kernel_size=filter_size,
                stride=stride,
                padding=self.padding,
                weight_attr=ParamAttr(initializer=Constant(0.)),
                bias_attr=ParamAttr(initializer=Constant(0.)))
            # 官方的DCNv2
            self.conv = DeformConv2D(
                in_channels=input_dim,
                out_channels=filters,
                kernel_size=filter_size,
                stride=stride,
                padding=self.padding,
                dilation=1,
                groups=groups,
                weight_attr=ParamAttr(learning_rate=lr),
                bias_attr=conv_battr)
            # 自实现的DCNv2
            # self.conv = MyDCNv2(
            #     in_channels=input_dim,
            #     out_channels=filters,
            #     kernel_size=filter_size,
            #     stride=stride,
            #     padding=self.padding,
            #     dilation=1,
            #     groups=groups,
            #     weight_attr=ParamAttr(learning_rate=lr),
            #     bias_attr=conv_battr)
        else:
            conv_battr = False
            if bias_attr:
                blr = lr
                if bias_lr:
                    blr = bias_lr
                conv_battr = ParamAttr(learning_rate=blr,
                                       initializer=bias_init,
                                       regularizer=L2Decay(0.))   # 不可以加正则化的参数:norm层(比如bn层、affine_channel层、gn层)的scale、offset;卷积层的偏移参数。
            self.conv = nn.Conv2D(
                in_channels=input_dim,
                out_channels=filters,
                kernel_size=filter_size,
                stride=stride,
                padding=self.padding,
                groups=groups,
                weight_attr=ParamAttr(learning_rate=lr, initializer=weight_init),
                bias_attr=conv_battr)


        # norm
        assert norm_type in [None, 'bn', 'sync_bn', 'gn', 'affine_channel', 'in', 'ln']
        bn, sync_bn, gn, af = get_norm(norm_type)
        if norm_type == 'in':
            norm_groups = filters
        if norm_type == 'ln':
            norm_groups = 1
        if conv_name == "conv1":
            norm_name = "bn_" + conv_name
            if gn:
                norm_name = "gn_" + conv_name
            if af:
                norm_name = "af_" + conv_name
        else:
            norm_name = "bn" + conv_name[3:]
            if gn:
                norm_name = "gn" + conv_name[3:]
            if af:
                norm_name = "af" + conv_name[3:]
        norm_lr = 0. if freeze_norm else lr
        pattr = ParamAttr(
            learning_rate=norm_lr,
            regularizer=L2Decay(norm_decay),   # 不可以加正则化的参数:norm层(比如bn层、affine_channel层、gn层)的scale、offset;卷积层的偏移参数。
            name=norm_name + "_scale",
            trainable=False if freeze_norm else True)
        battr = ParamAttr(
            learning_rate=norm_lr,
            regularizer=L2Decay(norm_decay),   # 不可以加正则化的参数:norm层(比如bn层、affine_channel层、gn层)的scale、offset;卷积层的偏移参数。
            name=norm_name + "_offset",
            trainable=False if freeze_norm else True)
        self.bn = None
        self.gn = None
        self.af = None
        if bn:
            self.bn = paddle.nn.BatchNorm2D(filters, weight_attr=pattr, bias_attr=battr)
        if sync_bn:
            self.bn = paddle.nn.SyncBatchNorm(filters, weight_attr=pattr, bias_attr=battr)
        if gn:
            self.gn = paddle.nn.GroupNorm(num_groups=norm_groups, num_channels=filters, weight_attr=pattr, bias_attr=battr)
        if af:
            self.af = True
            self.scale = self.create_parameter(
                shape=[filters],
                dtype='float32',
                attr=pattr,
                default_initializer=Constant(1.))
            self.offset = self.create_parameter(
                shape=[filters],
                dtype='float32',
                attr=battr,
                default_initializer=Constant(0.), is_bias=True)

        # act
        self.act = None
        if act == 'relu':
            self.act = paddle.nn.ReLU()
        elif act == 'leaky':
            self.act = paddle.nn.LeakyReLU(0.1)
        elif act == 'mish':
            self.act = Mish()
        elif act is None:
            pass
        else:
            raise NotImplementedError("Activation \'{}\' is not implemented.".format(act))