Example #1
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 is_vd_mode=False,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()

        self.is_vd_mode = is_vd_mode
        self._pool2d_avg = AvgPool2D(kernel_size=2,
                                     stride=2,
                                     padding=0,
                                     ceil_mode=True)
        self._conv = Conv2D(in_channels=num_channels,
                            out_channels=num_filters,
                            kernel_size=filter_size,
                            stride=stride,
                            padding=(filter_size - 1) // 2,
                            groups=groups,
                            weight_attr=ParamAttr(name=name + "_weights"),
                            bias_attr=False)
        bn_name = name + '_bn'
        self._batch_norm = BatchNorm(
            num_filters,
            act=act,
            param_attr=ParamAttr(name=bn_name + '_scale'),
            bias_attr=ParamAttr(bn_name + '_offset'),
            moving_mean_name=bn_name + '_mean',
            moving_variance_name=bn_name + '_variance')
Example #2
0
    def __init__(self,
                 num_channels,
                 filter_size,
                 num_filters,
                 stride,
                 padding,
                 channels=None,
                 num_groups=1,
                 name=None,
                 use_cudnn=True):
        super(ConvBNLayer, self).__init__()

        self._conv = Conv2D(in_channels=num_channels,
                            out_channels=num_filters,
                            kernel_size=filter_size,
                            stride=stride,
                            padding=padding,
                            groups=num_groups,
                            weight_attr=ParamAttr(name=name + "_weights"),
                            bias_attr=False)

        self._batch_norm = BatchNorm(
            num_filters,
            param_attr=ParamAttr(name=name + "_bn_scale"),
            bias_attr=ParamAttr(name=name + "_bn_offset"),
            moving_mean_name=name + "_bn_mean",
            moving_variance_name=name + "_bn_variance")
Example #3
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 padding=0,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()

        self._conv = Conv2D(
            in_channels=num_channels,
            out_channels=num_filters,
            kernel_size=filter_size,
            stride=stride,
            padding=padding,
            groups=groups,
            weight_attr=ParamAttr(name=name + ".conv2d.output.1.w_0"),
            bias_attr=ParamAttr(name=name + ".conv2d.output.1.b_0"))
        bn_name = name + "_bn"
        self._batch_norm = BatchNorm(
            num_filters,
            act=act,
            param_attr=ParamAttr(name=bn_name + ".output.1.w_0"),
            bias_attr=ParamAttr(bn_name + ".output.1.b_0"),
            moving_mean_name=bn_name + "_mean",
            moving_variance_name=bn_name + "_variance")
Example #4
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 groups=1,
                 act="relu",
                 name=None):
        super(ConvBNLayer, self).__init__()
        self._conv = nn.Conv2D(in_channels=in_channels,
                               out_channels=out_channels,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=(kernel_size - 1) // 2,
                               groups=groups,
                               weight_attr=ParamAttr(
                                   initializer=KaimingNormal(),
                                   name=name + "_weights"),
                               bias_attr=False)
        bn_name = name + "_bn"

        self._batch_norm = nn.BatchNorm(
            num_channels=out_channels,
            act=act,
            param_attr=ParamAttr(name=bn_name + "_scale",
                                 regularizer=paddle.regularizer.L2Decay(0.0)),
            bias_attr=ParamAttr(name=bn_name + "_offset",
                                regularizer=paddle.regularizer.L2Decay(0.0)),
            moving_mean_name=bn_name + "_mean",
            moving_variance_name=bn_name + "_variance")
Example #5
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 act=None,
                 name=None,
                 data_format="NCHW"):
        super(ConvBNLayer, self).__init__()

        self._conv = Conv2D(in_channels=num_channels,
                            out_channels=num_filters,
                            kernel_size=filter_size,
                            stride=stride,
                            padding=(filter_size - 1) // 2,
                            groups=groups,
                            weight_attr=ParamAttr(name=name + "_weights"),
                            bias_attr=False,
                            data_format=data_format)
        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]
        self._batch_norm = BatchNorm(
            num_filters,
            act=act,
            param_attr=ParamAttr(name=bn_name + "_scale"),
            bias_attr=ParamAttr(bn_name + "_offset"),
            moving_mean_name=bn_name + "_mean",
            moving_variance_name=bn_name + "_variance",
            data_layout=data_format)
 def __init__(self, channel, lr_mult, conv_decay, reduction=4, name=""):
     super(SEModule, self).__init__()
     self.avg_pool = nn.AdaptiveAvgPool2D(1)
     mid_channels = int(channel // reduction)
     self.conv1 = nn.Conv2D(
         in_channels=channel,
         out_channels=mid_channels,
         kernel_size=1,
         stride=1,
         padding=0,
         weight_attr=ParamAttr(
             learning_rate=lr_mult,
             regularizer=L2Decay(conv_decay),
             name=name + "_1_weights"),
         bias_attr=ParamAttr(
             learning_rate=lr_mult,
             regularizer=L2Decay(conv_decay),
             name=name + "_1_offset"))
     self.conv2 = nn.Conv2D(
         in_channels=mid_channels,
         out_channels=channel,
         kernel_size=1,
         stride=1,
         padding=0,
         weight_attr=ParamAttr(
             learning_rate=lr_mult,
             regularizer=L2Decay(conv_decay),
             name=name + "_2_weights"),
         bias_attr=ParamAttr(
             learning_rate=lr_mult,
             regularizer=L2Decay(conv_decay),
             name=name + "_2_offset"))
Example #7
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 padding,
                 groups=1,
                 if_act=True,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()
        self.if_act = if_act
        self.act = act
        self.conv = nn.Conv2D(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
            stride=stride,
            padding=padding,
            groups=groups,
            weight_attr=ParamAttr(name=name + '_weights'),
            bias_attr=False)

        self.bn = nn.BatchNorm(
            num_channels=out_channels,
            act=act,
            param_attr=ParamAttr(name="bn_" + name + "_scale"),
            bias_attr=ParamAttr(name="bn_" + name + "_offset"),
            moving_mean_name="bn_" + name + "_mean",
            moving_variance_name="bn_" + name + "_variance")
Example #8
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 padding=1,
                 conv_decay=0,
                 name=None):
        super(SepConvLayer, self).__init__()
        self.dw_conv = nn.Conv2D(in_channels=in_channels,
                                 out_channels=in_channels,
                                 kernel_size=kernel_size,
                                 stride=1,
                                 padding=padding,
                                 groups=in_channels,
                                 weight_attr=ParamAttr(
                                     name=name + "_dw_weights",
                                     regularizer=L2Decay(conv_decay)),
                                 bias_attr=False)

        self.bn = nn.BatchNorm2D(in_channels,
                                 weight_attr=ParamAttr(
                                     name=name + "_bn_scale",
                                     regularizer=L2Decay(0.)),
                                 bias_attr=ParamAttr(name=name + "_bn_offset",
                                                     regularizer=L2Decay(0.)))

        self.pw_conv = nn.Conv2D(in_channels=in_channels,
                                 out_channels=out_channels,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0,
                                 weight_attr=ParamAttr(
                                     name=name + "_pw_weights",
                                     regularizer=L2Decay(conv_decay)),
                                 bias_attr=False)
Example #9
0
def _convert_param_attr_to_list(param_attr, n):
    if isinstance(param_attr, (list, tuple)):
        assert len(param_attr) == n, (
            "length of param_attr should be %d when it is a list/tuple" % n)
        param_attrs = []
        for attr in param_attr:
            if isinstance(attr, bool):
                if attr:
                    param_attrs.append(ParamAttr._to_attr(None))
                else:
                    param_attrs.append(False)
            else:
                param_attrs.append(ParamAttr._to_attr(attr))
    elif isinstance(param_attr, bool):
        param_attrs = []
        if param_attr:
            param_attrs = [ParamAttr._to_attr(None) for i in range(n)]
        else:
            param_attrs = [False] * n
    else:
        param_attrs = []
        attr = ParamAttr._to_attr(param_attr)
        for i in range(n):
            attr_i = copy.deepcopy(attr)
            if attr.name:
                attr_i.name = attr_i.name + "_" + str(i)
            param_attrs.append(attr_i)
    return param_attrs
Example #10
0
    def __init__(self, in_channel, mid_channel, out_channel, fuse):
        super().__init__()
        self.conv1 = nn.Conv2D(
            in_channel,
            mid_channel,
            kernel_size=1,
            bias_attr=False,
            weight_attr=ParamAttr(initializer=KaimingNormal()))
        self.conv1_bn = nn.BatchNorm2D(mid_channel)

        self.conv2 = nn.Conv2D(
            mid_channel,
            out_channel,
            kernel_size=3,
            stride=1,
            padding=1,
            bias_attr=False,
            weight_attr=ParamAttr(initializer=KaimingNormal()))
        self.conv2_bn = nn.BatchNorm2D(out_channel)
        if fuse:
            self.att_conv = nn.Sequential(
                nn.Conv2D(mid_channel * 2, 2, kernel_size=1),
                nn.Sigmoid(),
            )
        else:
            self.att_conv = None
Example #11
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()

        self._conv = Conv2D(in_channels=num_channels,
                            out_channels=num_filters,
                            kernel_size=filter_size,
                            stride=stride,
                            padding=(filter_size - 1) // 2,
                            groups=groups,
                            weight_attr=ParamAttr(name=name + "_weights"),
                            bias_attr=False)
        bn_name = "bn_" + name
        self._batch_norm = BatchNorm(
            num_filters,
            act=act,
            param_attr=ParamAttr(name=bn_name + "_scale"),
            bias_attr=ParamAttr(name=bn_name + "_offset"),
            moving_mean_name=bn_name + '_mean',
            moving_variance_name=bn_name + '_variance')
Example #12
0
 def __init__(self, in_dim=256, mlp_dim=1024, resolution=7, num_stages=1):
     super(TwoFCHead, self).__init__()
     self.in_dim = in_dim
     self.mlp_dim = mlp_dim
     self.num_stages = num_stages
     fan = in_dim * resolution * resolution
     self.fc6_list = []
     self.fc6_relu_list = []
     self.fc7_list = []
     self.fc7_relu_list = []
     for stage in range(num_stages):
         fc6_name = 'fc6_{}'.format(stage)
         fc7_name = 'fc7_{}'.format(stage)
         fc6 = self.add_sublayer(
             fc6_name,
             nn.Linear(in_dim * resolution * resolution,
                       mlp_dim,
                       weight_attr=ParamAttr(initializer=XavierUniform(
                           fan_out=fan)),
                       bias_attr=ParamAttr(learning_rate=2.,
                                           regularizer=L2Decay(0.))))
         fc6_relu = self.add_sublayer(fc6_name + 'act', ReLU())
         fc7 = self.add_sublayer(
             fc7_name,
             nn.Linear(mlp_dim,
                       mlp_dim,
                       weight_attr=ParamAttr(initializer=XavierUniform()),
                       bias_attr=ParamAttr(learning_rate=2.,
                                           regularizer=L2Decay(0.))))
         fc7_relu = self.add_sublayer(fc7_name + 'act', ReLU())
         self.fc6_list.append(fc6)
         self.fc6_relu_list.append(fc6_relu)
         self.fc7_list.append(fc7)
         self.fc7_relu_list.append(fc7_relu)
Example #13
0
 def __init__(self,
              num_channels,
              num_filters,
              filter_size,
              stride=1,
              groups=1,
              is_vd_mode=False,
              act=None,
              lr_mult=1.0,
              data_format="NCHW"):
     super().__init__()
     self.is_vd_mode = is_vd_mode
     self.act = act
     self.avg_pool = AvgPool2D(
         kernel_size=2, stride=2, padding=0, ceil_mode=True)
     self.conv = Conv2D(
         in_channels=num_channels,
         out_channels=num_filters,
         kernel_size=filter_size,
         stride=stride,
         padding=(filter_size - 1) // 2,
         groups=groups,
         weight_attr=ParamAttr(learning_rate=lr_mult),
         bias_attr=False,
         data_format=data_format)
     self.bn = BatchNorm(
         num_filters,
         param_attr=ParamAttr(learning_rate=lr_mult),
         bias_attr=ParamAttr(learning_rate=lr_mult),
         data_layout=data_format)
     self.relu = nn.ReLU()
Example #14
0
    def __init__(self,
                 num_classes=80,
                 width_mult=1.0,
                 depthwise=False,
                 in_channels=[256, 512, 1024],
                 feat_channels=256,
                 fpn_strides=(8, 16, 32),
                 l1_epoch=285,
                 act='silu',
                 assigner=SimOTAAssigner(use_vfl=False),
                 nms='MultiClassNMS',
                 loss_weight={
                     'cls': 1.0,
                     'obj': 1.0,
                     'iou': 5.0,
                     'l1': 1.0
                 }):
        super(YOLOXHead, self).__init__()
        self._dtype = paddle.framework.get_default_dtype()
        self.num_classes = num_classes
        assert len(in_channels) > 0, "in_channels length should > 0"
        self.in_channels = in_channels
        feat_channels = int(feat_channels * width_mult)
        self.fpn_strides = fpn_strides
        self.l1_epoch = l1_epoch
        self.assigner = assigner
        self.nms = nms
        self.loss_weight = loss_weight
        self.iou_loss = IouLoss(loss_weight=1.0)  # default loss_weight 2.5

        ConvBlock = DWConv if depthwise else BaseConv

        self.stem_conv = nn.LayerList()
        self.conv_cls = nn.LayerList()
        self.conv_reg = nn.LayerList()  # reg [x,y,w,h] + obj
        for in_c in self.in_channels:
            self.stem_conv.append(BaseConv(in_c, feat_channels, 1, 1, act=act))

            self.conv_cls.append(
                nn.Sequential(*[
                    ConvBlock(feat_channels, feat_channels, 3, 1, act=act),
                    ConvBlock(feat_channels, feat_channels, 3, 1, act=act),
                    nn.Conv2D(feat_channels,
                              self.num_classes,
                              1,
                              bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
                ]))

            self.conv_reg.append(
                nn.Sequential(*[
                    ConvBlock(feat_channels, feat_channels, 3, 1, act=act),
                    ConvBlock(feat_channels, feat_channels, 3, 1, act=act),
                    nn.Conv2D(
                        feat_channels,
                        4 + 1,  # reg [x,y,w,h] + obj
                        1,
                        bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
                ]))

        self._init_weights()
    def basic_branch(self, num_conv_out_channels, input_ch):
        # the level indexes are defined from fine to coarse,
        # the branch will contain one more part than that of its previous level
        # the sliding step is set to 1
        pyramid_conv_list = nn.LayerList()
        pyramid_fc_list = nn.LayerList()

        idx_levels = 0
        for idx_branches in range(self.num_branches):
            if idx_branches >= sum(self.num_in_each_level[0:idx_levels + 1]):
                idx_levels += 1
            if self.used_levels[idx_levels] == 0:
                continue
            pyramid_conv_list.append(
                nn.Sequential(nn.Conv2D(input_ch, num_conv_out_channels, 1),
                              nn.BatchNorm2D(num_conv_out_channels),
                              nn.ReLU()))

        idx_levels = 0
        for idx_branches in range(self.num_branches):
            if idx_branches >= sum(self.num_in_each_level[0:idx_levels + 1]):
                idx_levels += 1
            if self.used_levels[idx_levels] == 0:
                continue
            name = "Linear_branch_id_{}".format(idx_branches)
            fc = nn.Linear(in_features=num_conv_out_channels,
                           out_features=self.num_classes,
                           weight_attr=ParamAttr(name=name + "_weights",
                                                 initializer=Normal(
                                                     mean=0., std=0.001)),
                           bias_attr=ParamAttr(name=name + "_bias",
                                               initializer=Constant(value=0.)))
            pyramid_fc_list.append(fc)
        return pyramid_conv_list, pyramid_fc_list
Example #16
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 padding,
                 num_groups=1,
                 act='relu',
                 conv_lr=1.,
                 conv_decay=0.,
                 norm_decay=0.,
                 norm_type='bn',
                 name=None):
        super(ConvBNLayer, self).__init__()
        self.act = act
        self._conv = nn.Conv2D(in_channels,
                               out_channels,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=padding,
                               groups=num_groups,
                               weight_attr=ParamAttr(
                                   learning_rate=conv_lr,
                                   initializer=KaimingNormal(),
                                   regularizer=L2Decay(conv_decay)),
                               bias_attr=False)

        param_attr = ParamAttr(regularizer=L2Decay(norm_decay))
        bias_attr = ParamAttr(regularizer=L2Decay(norm_decay))
        if norm_type in ['sync_bn', 'bn']:
            self._batch_norm = nn.BatchNorm2D(out_channels,
                                              weight_attr=param_attr,
                                              bias_attr=bias_attr)
Example #17
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 groups=1,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()
        self._conv = Conv2D(in_channels=in_channels,
                            out_channels=out_channels,
                            kernel_size=kernel_size,
                            stride=stride,
                            padding=(kernel_size - 1) // 2,
                            groups=groups,
                            weight_attr=ParamAttr(name=name + "_weights"),
                            bias_attr=False)
        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]

        self._act = act

        self._batch_norm = BatchNorm2D(
            out_channels,
            weight_attr=ParamAttr(name=bn_name + "_scale"),
            bias_attr=ParamAttr(bn_name + "_offset"))
Example #18
0
    def __init__(self,
                 num_channels: int,
                 filter_size: int,
                 num_filters: int,
                 stride: int,
                 padding: int,
                 channels: int = None,
                 num_groups: int = 1,
                 act: str = 'relu',
                 name: str = None):
        super(ConvBNLayer, self).__init__()

        self._conv = Conv2d(
            in_channels=num_channels,
            out_channels=num_filters,
            kernel_size=filter_size,
            stride=stride,
            padding=padding,
            groups=num_groups,
            weight_attr=ParamAttr(initializer=MSRA(), name=name + "_weights"),
            bias_attr=False)

        self._batch_norm = BatchNorm(
            num_filters,
            act=act,
            param_attr=ParamAttr(name + "_bn_scale"),
            bias_attr=ParamAttr(name + "_bn_offset"),
            moving_mean_name=name + "_bn_mean",
            moving_variance_name=name + "_bn_variance")
    def __init__(self,
                 input_channels,
                 output_channels,
                 filter_size,
                 stride=1,
                 padding=0,
                 act=None,
                 name=None):
        super(ConvBNLayer, self).__init__()

        self._conv = Conv2D(
            in_channels=input_channels,
            out_channels=output_channels,
            kernel_size=filter_size,
            stride=stride,
            padding=padding,
            weight_attr=ParamAttr(name=name + "/weights"),
            bias_attr=False)
        self._bn = BatchNorm(
            num_channels=output_channels,
            act=act,
            epsilon=1e-3,
            momentum=0.99,
            param_attr=ParamAttr(name=name + "/BatchNorm/gamma"),
            bias_attr=ParamAttr(name=name + "/BatchNorm/beta"),
            moving_mean_name=name + "/BatchNorm/moving_mean",
            moving_variance_name=name + "/BatchNorm/moving_variance")
Example #20
0
 def __init__(self,
              input_channels,
              output_channels,
              filter_size,
              stride=1,
              groups=1,
              act=None,
              name=None):
     super(ConvBNLayer, self).__init__()
     if "downsample" in name:
         conv_name = name + ".0"
     else:
         conv_name = name
     self._conv = Conv2D(in_channels=input_channels,
                         out_channels=output_channels,
                         kernel_size=filter_size,
                         stride=stride,
                         padding=(filter_size - 1) // 2,
                         groups=groups,
                         weight_attr=ParamAttr(name=conv_name + ".weight"),
                         bias_attr=False)
     if "downsample" in name:
         bn_name = name[:9] + "downsample.1"
     else:
         if "conv1" == name:
             bn_name = "bn" + name[-1]
         else:
             bn_name = (name[:10] if name[7:9].isdigit() else
                        name[:9]) + "bn" + name[-1]
     self._bn = BatchNorm(num_channels=output_channels,
                          act=act,
                          param_attr=ParamAttr(name=bn_name + ".weight"),
                          bias_attr=ParamAttr(name=bn_name + ".bias"),
                          moving_mean_name=bn_name + ".running_mean",
                          moving_variance_name=bn_name + ".running_var")
Example #21
0
    def __init__(
            self,
            in_channels,
            out_channels,
            kernel_size,
            stride=1,
            groups=1,
            is_vd_mode=False,
            act=None,
            name=None, ):
        super(ConvBNLayer, self).__init__()

        self.is_vd_mode = is_vd_mode
        self._pool2d_avg = nn.AvgPool2D(
            kernel_size=2, stride=2, padding=0, ceil_mode=True)
        self._conv = nn.Conv2D(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
            stride=stride,
            padding=(kernel_size - 1) // 2,
            groups=groups,
            weight_attr=ParamAttr(name=name + "_weights"),
            bias_attr=False)
        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]
        self._batch_norm = nn.BatchNorm(
            out_channels,
            act=act,
            param_attr=ParamAttr(name=bn_name + '_scale'),
            bias_attr=ParamAttr(bn_name + '_offset'),
            moving_mean_name=bn_name + '_mean',
            moving_variance_name=bn_name + '_variance')
Example #22
0
    def __init__(self,
                 ch_in,
                 ch_out,
                 filter_size=3,
                 stride=1,
                 groups=1,
                 padding=0,
                 act="leaky",
                 name=None):
        super(ConvBNLayer, self).__init__()

        self.conv = nn.Conv2d(in_channels=ch_in,
                              out_channels=ch_out,
                              kernel_size=filter_size,
                              stride=stride,
                              padding=padding,
                              groups=groups,
                              weight_attr=ParamAttr(name=name +
                                                    '.conv.weights'),
                              bias_attr=False)
        bn_name = name + '.bn'
        self.batch_norm = nn.BatchNorm2d(
            ch_out,
            weight_attr=ParamAttr(name=bn_name + '.scale',
                                  regularizer=L2Decay(0.)),
            bias_attr=ParamAttr(name=bn_name + '.offset',
                                regularizer=L2Decay(0.)))

        self.act = act
Example #23
0
    def __init__(self,
                 anchors=[[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
                          [59, 119], [116, 90], [156, 198], [373, 326]],
                 anchor_masks=[[6, 7, 8], [3, 4, 5], [0, 1, 2]],
                 num_classes=80,
                 loss='YOLOv3Loss'):
        super(YOLOv3Head, self).__init__()
        self.num_classes = num_classes
        self.loss = loss

        self.parse_anchor(anchors, anchor_masks)
        self.num_outputs = len(self.anchors)

        self.yolo_outputs = []
        for i in range(len(self.anchors)):
            num_filters = self.num_outputs * (self.num_classes + 5)
            name = 'yolo_output.{}'.format(i)
            yolo_output = self.add_sublayer(
                name,
                nn.Conv2D(in_channels=1024 // (2**i),
                          out_channels=num_filters,
                          kernel_size=1,
                          stride=1,
                          padding=0,
                          weight_attr=ParamAttr(name=name + '.conv.weights'),
                          bias_attr=ParamAttr(name=name + '.conv.bias',
                                              regularizer=L2Decay(0.))))
            self.yolo_outputs.append(yolo_output)
Example #24
0
    def __init__(self, config=None):
        super(Net, self).__init__(config)
        if self.config['data_format'] in [
                "NC", "NCL", "NCHW", "NCDHW", "NCHW"
        ]:
            param_shape = [self.config['input_shape'][1]]
        else:
            param_shape = [self.config['input_shape'][-1]]
        dtype = self.config['dtype']

        self.mean = self.create_parameter(
            dtype=dtype,
            attr=ParamAttr(initializer=paddle.nn.initializer.Constant(0.0),
                           trainable=False,
                           do_model_average=True),
            shape=param_shape)

        self.variance = self.create_parameter(
            dtype=dtype,
            attr=ParamAttr(initializer=paddle.nn.initializer.Constant(1.0),
                           trainable=False,
                           do_model_average=True),
            shape=param_shape)

        self.weight = self.create_parameter(
            shape=param_shape,
            dtype=dtype,
            default_initializer=paddle.nn.initializer.Constant(1.0))

        self.bias = self.create_parameter(shape=param_shape,
                                          dtype=dtype,
                                          is_bias=True)
Example #25
0
    def __init__(self,
                 num_channels: int,
                 num_filters: int,
                 filter_size: int,
                 stride: int = 1,
                 groups: int = 1,
                 act: str = None,
                 name: str = None):
        super(ConvBNLayer, self).__init__()

        self._conv = Conv2d(in_channels=num_channels,
                            out_channels=num_filters,
                            kernel_size=filter_size,
                            stride=stride,
                            padding=(filter_size - 1) // 2,
                            groups=groups,
                            weight_attr=ParamAttr(name=name + "_weights"),
                            bias_attr=False)
        if name == "conv1":
            bn_name = "bn_" + name
        else:
            bn_name = "bn" + name[3:]
        self._batch_norm = BatchNorm(
            num_filters,
            act=act,
            param_attr=ParamAttr(name=bn_name + '_scale'),
            bias_attr=ParamAttr(bn_name + '_offset'),
            moving_mean_name=bn_name + '_mean',
            moving_variance_name=bn_name + '_variance')
Example #26
0
def initial_type(name: str, use_bias: bool = False):
    param_attr = ParamAttr(name=name + "_weights")
    if use_bias:
        bias_attr = ParamAttr(name=name + "_offset")
    else:
        bias_attr = False
    return param_attr, bias_attr
Example #27
0
 def __init__(self,
              in_c,
              out_c,
              filter_size,
              stride,
              padding,
              num_groups=1,
              if_act=True,
              act=None,
              use_cudnn=True,
              name=""):
     super(ConvBNLayer, self).__init__()
     self.if_act = if_act
     self.act = act
     self.conv = Conv2D(in_channels=in_c,
                        out_channels=out_c,
                        kernel_size=filter_size,
                        stride=stride,
                        padding=padding,
                        groups=num_groups,
                        weight_attr=ParamAttr(name=name + "_weights"),
                        bias_attr=False)
     self.bn = BatchNorm(num_channels=out_c,
                         act=None,
                         param_attr=ParamAttr(name=name + "_bn_scale",
                                              regularizer=L2Decay(0.0)),
                         bias_attr=ParamAttr(name=name + "_bn_offset",
                                             regularizer=L2Decay(0.0)),
                         moving_mean_name=name + "_bn_mean",
                         moving_variance_name=name + "_bn_variance")
Example #28
0
 def __init__(self, ch_in, ch_out=128, num_classes=80, conv_num=2):
     super(HMHead, self).__init__()
     head_conv = nn.Sequential()
     for i in range(conv_num):
         name = 'conv.{}'.format(i)
         head_conv.add_sublayer(
             name,
             nn.Conv2D(in_channels=ch_in if i == 0 else ch_out,
                       out_channels=ch_out,
                       kernel_size=3,
                       padding=1,
                       weight_attr=ParamAttr(initializer=Normal(0, 0.01)),
                       bias_attr=ParamAttr(learning_rate=2.,
                                           regularizer=L2Decay(0.))))
         head_conv.add_sublayer(name + '.act', nn.ReLU())
     self.feat = self.add_sublayer('hm_feat', head_conv)
     bias_init = float(-np.log((1 - 0.01) / 0.01))
     self.head = self.add_sublayer(
         'hm_head',
         nn.Conv2D(in_channels=ch_out,
                   out_channels=num_classes,
                   kernel_size=1,
                   weight_attr=ParamAttr(initializer=Normal(0, 0.01)),
                   bias_attr=ParamAttr(learning_rate=2.,
                                       regularizer=L2Decay(0.),
                                       initializer=Constant(bias_init))))
Example #29
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 pad=0,
                 groups=1,
                 act="relu",
                 name=None):
        super(BNACConvLayer, self).__init__()

        self._batch_norm = BatchNorm(
            num_channels,
            act=act,
            param_attr=ParamAttr(name=name + '_bn_scale'),
            bias_attr=ParamAttr(name + '_bn_offset'),
            moving_mean_name=name + '_bn_mean',
            moving_variance_name=name + '_bn_variance')

        self._conv = Conv2D(in_channels=num_channels,
                            out_channels=num_filters,
                            kernel_size=filter_size,
                            stride=stride,
                            padding=pad,
                            groups=groups,
                            weight_attr=ParamAttr(name=name + "_weights"),
                            bias_attr=False)
Example #30
0
    def __init__(self,
                 num_channels: int,
                 filter_size: int,
                 num_filters: int,
                 stride: int,
                 padding: int,
                 channels: int = None,
                 num_groups: int = 1,
                 if_act: bool = True,
                 act: str = 'relu',
                 name: str = None):
        super(ConvBNLayer, self).__init__()
        self._if_act = if_act
        assert act in ['relu', 'swish'], \
            "supported act are {} but your act is {}".format(
                ['relu', 'swish'], act)
        self._act = act
        self._conv = Conv2d(in_channels=num_channels,
                            out_channels=num_filters,
                            kernel_size=filter_size,
                            stride=stride,
                            padding=padding,
                            groups=num_groups,
                            weight_attr=ParamAttr(initializer=MSRA(),
                                                  name=name + "_weights"),
                            bias_attr=False)

        self._batch_norm = BatchNorm(
            num_filters,
            param_attr=ParamAttr(name=name + "_bn_scale"),
            bias_attr=ParamAttr(name=name + "_bn_offset"),
            moving_mean_name=name + "_bn_mean",
            moving_variance_name=name + "_bn_variance")