def __init__(self, ch_in, ch_out, norm_type='bn'):
        super(DeConv, self).__init__()
        self.deconv = nn.Sequential()
        conv1 = ConvNormLayer(ch_in=ch_in,
                              ch_out=ch_out,
                              stride=1,
                              filter_size=1,
                              norm_type=norm_type,
                              initializer=XavierUniform())
        conv2 = nn.Conv2DTranspose(
            in_channels=ch_out,
            out_channels=ch_out,
            kernel_size=4,
            padding=1,
            stride=2,
            groups=ch_out,
            weight_attr=ParamAttr(initializer=XavierUniform()),
            bias_attr=False)
        bn = batch_norm(ch_out, norm_type=norm_type, norm_decay=0.)
        conv3 = ConvNormLayer(ch_in=ch_out,
                              ch_out=ch_out,
                              stride=1,
                              filter_size=1,
                              norm_type=norm_type,
                              initializer=XavierUniform())

        self.deconv.add_sublayer('conv1', conv1)
        self.deconv.add_sublayer('relu6_1', nn.ReLU6())
        self.deconv.add_sublayer('conv2', conv2)
        self.deconv.add_sublayer('bn', bn)
        self.deconv.add_sublayer('relu6_2', nn.ReLU6())
        self.deconv.add_sublayer('conv3', conv3)
        self.deconv.add_sublayer('relu6_3', nn.ReLU6())
Exemple #2
0
 def __init__(self, in_dim=256, mlp_dim=1024, resolution=7, num_stages=1):
     super(TwoFCHead, self).__init__()
     self.in_dim = in_dim
     self.mlp_dim = mlp_dim
     self.num_stages = num_stages
     fan = in_dim * resolution * resolution
     self.fc6_list = []
     self.fc6_relu_list = []
     self.fc7_list = []
     self.fc7_relu_list = []
     for stage in range(num_stages):
         fc6_name = 'fc6_{}'.format(stage)
         fc7_name = 'fc7_{}'.format(stage)
         fc6 = self.add_sublayer(
             fc6_name,
             nn.Linear(in_dim * resolution * resolution,
                       mlp_dim,
                       weight_attr=ParamAttr(initializer=XavierUniform(
                           fan_out=fan)),
                       bias_attr=ParamAttr(learning_rate=2.,
                                           regularizer=L2Decay(0.))))
         fc6_relu = self.add_sublayer(fc6_name + 'act', ReLU())
         fc7 = self.add_sublayer(
             fc7_name,
             nn.Linear(mlp_dim,
                       mlp_dim,
                       weight_attr=ParamAttr(initializer=XavierUniform()),
                       bias_attr=ParamAttr(learning_rate=2.,
                                           regularizer=L2Decay(0.))))
         fc7_relu = self.add_sublayer(fc7_name + 'act', ReLU())
         self.fc6_list.append(fc6)
         self.fc6_relu_list.append(fc6_relu)
         self.fc7_list.append(fc7)
         self.fc7_relu_list.append(fc7_relu)
Exemple #3
0
    def __init__(self, in_channel=256, out_channel=1024, resolution=7):
        super(TwoFCHead, self).__init__()
        self.in_channel = in_channel
        self.out_channel = out_channel
        fan = in_channel * resolution * resolution
        self.fc6 = nn.Linear(in_channel * resolution * resolution,
                             out_channel,
                             weight_attr=paddle.ParamAttr(
                                 initializer=XavierUniform(fan_out=fan)))

        self.fc7 = nn.Linear(
            out_channel,
            out_channel,
            weight_attr=paddle.ParamAttr(initializer=XavierUniform()))
Exemple #4
0
    def __init__(self, in_dim=256, mlp_dim=1024, resolution=7):
        super(TwoFCHead, self).__init__()
        self.in_dim = in_dim
        self.mlp_dim = mlp_dim
        fan = in_dim * resolution * resolution
        self.fc6 = nn.Linear(in_dim * resolution * resolution,
                             mlp_dim,
                             weight_attr=paddle.ParamAttr(
                                 initializer=XavierUniform(fan_out=fan)))

        self.fc7 = nn.Linear(
            mlp_dim,
            mlp_dim,
            weight_attr=paddle.ParamAttr(initializer=XavierUniform()))
Exemple #5
0
    def __init__(self,
                 in_channels,
                 out_channel,
                 min_level=0,
                 max_level=4,
                 spatial_scale=[0.25, 0.125, 0.0625, 0.03125]):

        super(FPN, self).__init__()
        self.lateral_convs = []
        self.fpn_convs = []
        fan = out_channel * 3 * 3

        for i in range(min_level, max_level):
            if i == 3:
                lateral_name = 'fpn_inner_res5_sum'
            else:
                lateral_name = 'fpn_inner_res{}_sum_lateral'.format(i + 2)
            in_c = in_channels[i]
            lateral = self.add_sublayer(
                lateral_name,
                Conv2D(
                    in_channels=in_c,
                    out_channels=out_channel,
                    kernel_size=1,
                    weight_attr=ParamAttr(
                        initializer=XavierUniform(fan_out=in_c)),
                    bias_attr=ParamAttr(
                        learning_rate=2., regularizer=L2Decay(0.))))
            self.lateral_convs.append(lateral)

            fpn_name = 'fpn_res{}_sum'.format(i + 2)
            fpn_conv = self.add_sublayer(
                fpn_name,
                Conv2D(
                    in_channels=out_channel,
                    out_channels=out_channel,
                    kernel_size=3,
                    padding=1,
                    weight_attr=ParamAttr(
                        initializer=XavierUniform(fan_out=fan)),
                    bias_attr=ParamAttr(
                        learning_rate=2., regularizer=L2Decay(0.))))
            self.fpn_convs.append(fpn_conv)

        self.min_level = min_level
        self.max_level = max_level
        self.spatial_scale = spatial_scale
Exemple #6
0
 def __init__(self,
              in_channels,
              out_channels,
              stride=1,
              with_act=True,
              norm_type='sync_bn',
              name=None):
     super(LiteConv, self).__init__()
     self.lite_conv = nn.Sequential()
     conv1 = ConvNormLayer(in_channels,
                           in_channels,
                           filter_size=5,
                           stride=stride,
                           groups=in_channels,
                           norm_type=norm_type,
                           initializer=XavierUniform())
     conv2 = ConvNormLayer(in_channels,
                           out_channels,
                           filter_size=1,
                           stride=stride,
                           norm_type=norm_type,
                           initializer=XavierUniform())
     conv3 = ConvNormLayer(out_channels,
                           out_channels,
                           filter_size=1,
                           stride=stride,
                           norm_type=norm_type,
                           initializer=XavierUniform())
     conv4 = ConvNormLayer(out_channels,
                           out_channels,
                           filter_size=5,
                           stride=stride,
                           groups=out_channels,
                           norm_type=norm_type,
                           initializer=XavierUniform())
     conv_list = [conv1, conv2, conv3, conv4]
     self.lite_conv.add_sublayer('conv1', conv1)
     self.lite_conv.add_sublayer('relu6_1', nn.ReLU6())
     self.lite_conv.add_sublayer('conv2', conv2)
     if with_act:
         self.lite_conv.add_sublayer('relu6_2', nn.ReLU6())
     self.lite_conv.add_sublayer('conv3', conv3)
     self.lite_conv.add_sublayer('relu6_3', nn.ReLU6())
     self.lite_conv.add_sublayer('conv4', conv4)
     if with_act:
         self.lite_conv.add_sublayer('relu6_4', nn.ReLU6())
Exemple #7
0
 def __init__(self, feature_dim, class_dim, s=64.0, m=0.50):
     super(ArcNet, self).__init__()
     self.weight = paddle.create_parameter([feature_dim, class_dim],
                                           dtype='float32',
                                           attr=XavierUniform())
     self.class_dim = class_dim
     self.m = m
     self.s = s
     self.cos_m = math.cos(m)
     self.sin_m = math.sin(m)
     self.threshold = math.cos(math.pi - m)
     self.mm = self.sin_m * m
Exemple #8
0
    def __init__(self,
                 in_dim=256,
                 num_convs=4,
                 conv_dim=256,
                 mlp_dim=1024,
                 resolution=7,
                 norm_type='gn',
                 freeze_norm=False,
                 stage_name=''):
        super(XConvNormHead, self).__init__()
        self.in_dim = in_dim
        self.num_convs = num_convs
        self.conv_dim = conv_dim
        self.mlp_dim = mlp_dim
        self.norm_type = norm_type
        self.freeze_norm = freeze_norm

        self.bbox_head_convs = []
        fan = conv_dim * 3 * 3
        initializer = KaimingNormal(fan_in=fan)
        for i in range(self.num_convs):
            in_c = in_dim if i == 0 else conv_dim
            head_conv_name = stage_name + 'bbox_head_conv{}'.format(i)
            head_conv = self.add_sublayer(
                head_conv_name,
                ConvNormLayer(ch_in=in_c,
                              ch_out=conv_dim,
                              filter_size=3,
                              stride=1,
                              norm_type=self.norm_type,
                              norm_name=head_conv_name + '_norm',
                              freeze_norm=self.freeze_norm,
                              initializer=initializer,
                              name=head_conv_name))
            self.bbox_head_convs.append(head_conv)

        fan = conv_dim * resolution * resolution
        self.fc6 = nn.Linear(conv_dim * resolution * resolution,
                             mlp_dim,
                             weight_attr=paddle.ParamAttr(
                                 initializer=XavierUniform(fan_out=fan)),
                             bias_attr=paddle.ParamAttr(
                                 learning_rate=2., regularizer=L2Decay(0.)))
Exemple #9
0
    def __init__(self,
                 in_channels,
                 out_channel,
                 spatial_scales=[0.25, 0.125, 0.0625, 0.03125],
                 has_extra_convs=False,
                 extra_stage=1,
                 use_c5=True,
                 norm_type=None,
                 norm_decay=0.,
                 freeze_norm=False,
                 relu_before_extra_convs=True):
        super(FPN, self).__init__()
        self.out_channel = out_channel
        for s in range(extra_stage):
            spatial_scales = spatial_scales + [spatial_scales[-1] / 2.]
        self.spatial_scales = spatial_scales
        self.has_extra_convs = has_extra_convs
        self.extra_stage = extra_stage
        self.use_c5 = use_c5
        self.relu_before_extra_convs = relu_before_extra_convs
        self.norm_type = norm_type
        self.norm_decay = norm_decay
        self.freeze_norm = freeze_norm

        self.lateral_convs = []
        self.fpn_convs = []
        fan = out_channel * 3 * 3

        # stage index 0,1,2,3 stands for res2,res3,res4,res5 on ResNet Backbone
        # 0 <= st_stage < ed_stage <= 3
        st_stage = 4 - len(in_channels)
        ed_stage = st_stage + len(in_channels) - 1
        for i in range(st_stage, ed_stage + 1):
            if i == 3:
                lateral_name = 'fpn_inner_res5_sum'
            else:
                lateral_name = 'fpn_inner_res{}_sum_lateral'.format(i + 2)
            in_c = in_channels[i - st_stage]
            if self.norm_type == 'gn':
                lateral = self.add_sublayer(
                    lateral_name,
                    ConvNormLayer(ch_in=in_c,
                                  ch_out=out_channel,
                                  filter_size=1,
                                  stride=1,
                                  norm_type=self.norm_type,
                                  norm_decay=self.norm_decay,
                                  norm_name=lateral_name + '_norm',
                                  freeze_norm=self.freeze_norm,
                                  initializer=XavierUniform(fan_out=in_c),
                                  name=lateral_name))
            else:
                lateral = self.add_sublayer(
                    lateral_name,
                    nn.Conv2D(in_channels=in_c,
                              out_channels=out_channel,
                              kernel_size=1,
                              weight_attr=ParamAttr(initializer=XavierUniform(
                                  fan_out=in_c))))
            self.lateral_convs.append(lateral)

            fpn_name = 'fpn_res{}_sum'.format(i + 2)
            if self.norm_type == 'gn':
                fpn_conv = self.add_sublayer(
                    fpn_name,
                    ConvNormLayer(ch_in=out_channel,
                                  ch_out=out_channel,
                                  filter_size=3,
                                  stride=1,
                                  norm_type=self.norm_type,
                                  norm_decay=self.norm_decay,
                                  norm_name=fpn_name + '_norm',
                                  freeze_norm=self.freeze_norm,
                                  initializer=XavierUniform(fan_out=fan),
                                  name=fpn_name))
            else:
                fpn_conv = self.add_sublayer(
                    fpn_name,
                    nn.Conv2D(in_channels=out_channel,
                              out_channels=out_channel,
                              kernel_size=3,
                              padding=1,
                              weight_attr=ParamAttr(initializer=XavierUniform(
                                  fan_out=fan))))
            self.fpn_convs.append(fpn_conv)

        # add extra conv levels for RetinaNet(use_c5)/FCOS(use_p5)
        if self.has_extra_convs:
            for i in range(self.extra_stage):
                lvl = ed_stage + 1 + i
                if i == 0 and self.use_c5:
                    in_c = in_channels[-1]
                else:
                    in_c = out_channel
                extra_fpn_name = 'fpn_{}'.format(lvl + 2)
                if self.norm_type == 'gn':
                    extra_fpn_conv = self.add_sublayer(
                        extra_fpn_name,
                        ConvNormLayer(ch_in=in_c,
                                      ch_out=out_channel,
                                      filter_size=3,
                                      stride=2,
                                      norm_type=self.norm_type,
                                      norm_decay=self.norm_decay,
                                      norm_name=extra_fpn_name + '_norm',
                                      freeze_norm=self.freeze_norm,
                                      initializer=XavierUniform(fan_out=fan),
                                      name=extra_fpn_name))
                else:
                    extra_fpn_conv = self.add_sublayer(
                        extra_fpn_name,
                        nn.Conv2D(in_channels=in_c,
                                  out_channels=out_channel,
                                  kernel_size=3,
                                  stride=2,
                                  padding=1,
                                  weight_attr=ParamAttr(
                                      initializer=XavierUniform(fan_out=fan))))
                self.fpn_convs.append(extra_fpn_conv)
Exemple #10
0
    def __init__(self,
                 in_channels,
                 out_channel,
                 min_level=0,
                 max_level=4,
                 spatial_scale=[0.25, 0.125, 0.0625, 0.03125],
                 has_extra_convs=False,
                 use_c5=True,
                 relu_before_extra_convs=True):

        super(FPN, self).__init__()
        self.min_level = min_level
        self.max_level = max_level
        self.spatial_scale = spatial_scale
        self.has_extra_convs = has_extra_convs
        self.use_c5 = use_c5
        self.relu_before_extra_convs = relu_before_extra_convs

        self.lateral_convs = []
        self.fpn_convs = []
        fan = out_channel * 3 * 3

        self.num_backbone_stages = len(spatial_scale)
        self.num_outs = self.max_level - self.min_level + 1
        self.highest_backbone_level = self.min_level + self.num_backbone_stages - 1

        for i in range(self.min_level, self.highest_backbone_level + 1):
            if i == 3:
                lateral_name = 'fpn_inner_res5_sum'
            else:
                lateral_name = 'fpn_inner_res{}_sum_lateral'.format(i + 2)
            in_c = in_channels[i]
            lateral = self.add_sublayer(
                lateral_name,
                Conv2D(
                    in_channels=in_c,
                    out_channels=out_channel,
                    kernel_size=1,
                    weight_attr=ParamAttr(
                        initializer=XavierUniform(fan_out=in_c)),
                    bias_attr=ParamAttr(
                        learning_rate=2., regularizer=L2Decay(0.))))
            self.lateral_convs.append(lateral)

            fpn_name = 'fpn_res{}_sum'.format(i + 2)
            fpn_conv = self.add_sublayer(
                fpn_name,
                Conv2D(
                    in_channels=out_channel,
                    out_channels=out_channel,
                    kernel_size=3,
                    padding=1,
                    weight_attr=ParamAttr(
                        initializer=XavierUniform(fan_out=fan)),
                    bias_attr=ParamAttr(
                        learning_rate=2., regularizer=L2Decay(0.))))
            self.fpn_convs.append(fpn_conv)

        # add extra conv levels for RetinaNet(use_c5)/FCOS(use_p5)
        if self.has_extra_convs and self.num_outs > self.num_backbone_stages:
            for lvl in range(self.highest_backbone_level + 1, self.max_level + 1):  # P6 P7 ...
                if lvl == self.highest_backbone_level + 1 and self.use_c5:
                    in_c = in_channels[self.highest_backbone_level]
                else:
                    in_c = out_channel
                extra_fpn_name = 'fpn_{}'.format(lvl + 2)
                extra_fpn_conv = self.add_sublayer(
                    extra_fpn_name,
                    Conv2D(
                        in_channels=in_c,
                        out_channels=out_channel,
                        kernel_size=3,
                        stride=2,
                        padding=1,
                        weight_attr=ParamAttr(
                            initializer=XavierUniform(fan_out=fan)),
                        bias_attr=ParamAttr(
                            learning_rate=2., regularizer=L2Decay(0.))))
                self.fpn_convs.append(extra_fpn_conv)
Exemple #11
0
    def __init__(self,
                 in_channels,
                 out_channel,
                 spatial_scales=[0.25, 0.125, 0.0625, 0.03125],
                 has_extra_convs=False,
                 extra_stage=1,
                 use_c5=True,
                 relu_before_extra_convs=True):

        super(FPN, self).__init__()
        self.out_channel = out_channel
        for s in range(extra_stage):
            spatial_scales = spatial_scales + [spatial_scales[-1] / 2.]
        self.spatial_scales = spatial_scales
        self.has_extra_convs = has_extra_convs
        self.extra_stage = extra_stage
        self.use_c5 = use_c5
        self.relu_before_extra_convs = relu_before_extra_convs

        self.lateral_convs = []
        self.fpn_convs = []
        fan = out_channel * 3 * 3

        for i in range(len(in_channels)):
            if i == 3:
                lateral_name = 'fpn_inner_res5_sum'
            else:
                lateral_name = 'fpn_inner_res{}_sum_lateral'.format(i + 2)
            in_c = in_channels[i]
            lateral = self.add_sublayer(
                lateral_name,
                Conv2D(
                    in_channels=in_c,
                    out_channels=out_channel,
                    kernel_size=1,
                    weight_attr=ParamAttr(
                        initializer=XavierUniform(fan_out=in_c))))
            self.lateral_convs.append(lateral)

            fpn_name = 'fpn_res{}_sum'.format(i + 2)
            fpn_conv = self.add_sublayer(
                fpn_name,
                Conv2D(
                    in_channels=out_channel,
                    out_channels=out_channel,
                    kernel_size=3,
                    padding=1,
                    weight_attr=ParamAttr(
                        initializer=XavierUniform(fan_out=fan))))
            self.fpn_convs.append(fpn_conv)

        # add extra conv levels for RetinaNet(use_c5)/FCOS(use_p5)
        if self.has_extra_convs:
            for lvl in range(self.extra_stage):  # P6 P7 ...
                if lvl == 0 and self.use_c5:
                    in_c = in_channels[-1]
                else:
                    in_c = out_channel
                extra_fpn_name = 'fpn_{}'.format(lvl + 2)
                extra_fpn_conv = self.add_sublayer(
                    extra_fpn_name,
                    Conv2D(
                        in_channels=in_c,
                        out_channels=out_channel,
                        kernel_size=3,
                        stride=2,
                        padding=1,
                        weight_attr=ParamAttr(
                            initializer=XavierUniform(fan_out=fan))))
                self.fpn_convs.append(extra_fpn_conv)