示例#1
0
    def _add_topdown_lateral(self, body_name, body_input, upper_output):
        lateral_name = 'fpn_inner_' + body_name + '_lateral'
        topdown_name = 'fpn_topdown_' + body_name
        fan = body_input.shape[1]
        if self.norm_type:
            initializer = Xavier(fan_out=fan)
            lateral = ConvNorm(
                body_input,
                self.num_chan,
                1,
                initializer=initializer,
                norm_type=self.norm_type,
                name=lateral_name,
                bn_name=lateral_name)
        else:
            lateral = fluid.layers.conv2d(
                body_input,
                self.num_chan,
                1,
                param_attr=ParamAttr(
                    name=lateral_name + "_w", initializer=Xavier(fan_out=fan)),
                bias_attr=ParamAttr(
                    name=lateral_name + "_b",
                    learning_rate=2.,
                    regularizer=L2Decay(0.)),
                name=lateral_name)
        shape = fluid.layers.shape(upper_output)
        shape_hw = fluid.layers.slice(shape, axes=[0], starts=[2], ends=[4])
        out_shape_ = shape_hw * 2
        out_shape = fluid.layers.cast(out_shape_, dtype='int32')
        out_shape.stop_gradient = True
        topdown = fluid.layers.resize_nearest(
            upper_output, scale=2., actual_shape=out_shape, name=topdown_name)

        return lateral + topdown
示例#2
0
 def __call__(self, roi_feat, wb_scalar=1.0, name=''):
     conv = roi_feat
     fan = self.conv_dim * 3 * 3
     initializer = MSRA(uniform=False, fan_in=fan)
     for i in range(self.num_conv):
         name = 'bbox_head_conv' + str(i)
         conv = ConvNorm(conv,
                         self.conv_dim,
                         3,
                         act='relu',
                         initializer=initializer,
                         norm_type=self.norm_type,
                         freeze_norm=self.freeze_norm,
                         lr_scale=wb_scalar,
                         name=name,
                         norm_name=name)
     fan = conv.shape[1] * conv.shape[2] * conv.shape[3]
     head_heat = fluid.layers.fc(
         input=conv,
         size=self.mlp_dim,
         act='relu',
         name='fc6' + name,
         param_attr=ParamAttr(name='fc6%s_w' % name,
                              initializer=Xavier(fan_out=fan),
                              learning_rate=wb_scalar),
         bias_attr=ParamAttr(name='fc6%s_b' % name,
                             regularizer=L2Decay(0.),
                             learning_rate=wb_scalar * 2))
     return head_heat
示例#3
0
    def _add_topdown_lateral(self, body_name, body_input, upper_output):
        lateral_name = 'fpn_inner_' + body_name + '_lateral'
        topdown_name = 'fpn_topdown_' + body_name
        fan = body_input.shape[1]
        if self.norm_type:
            initializer = Xavier(fan_out=fan)
            lateral = ConvNorm(body_input,
                               self.num_chan,
                               1,
                               initializer=initializer,
                               norm_type=self.norm_type,
                               freeze_norm=self.freeze_norm,
                               name=lateral_name,
                               norm_name=lateral_name)
        else:
            lateral = fluid.layers.conv2d(
                body_input,
                self.num_chan,
                1,
                param_attr=ParamAttr(name=lateral_name + "_w",
                                     initializer=Xavier(fan_out=fan)),
                bias_attr=ParamAttr(name=lateral_name + "_b",
                                    learning_rate=2.,
                                    regularizer=L2Decay(0.)),
                name=lateral_name)
        topdown = fluid.layers.resize_nearest(upper_output,
                                              scale=2.,
                                              name=topdown_name)

        return lateral + topdown
示例#4
0
    def dense_aspp_block(self, input, num_filters1, num_filters2,
                         dilation_rate, dropout_prob, name):

        conv = ConvNorm(input,
                        num_filters=num_filters1,
                        filter_size=1,
                        stride=1,
                        groups=1,
                        norm_decay=0.,
                        norm_type='gn',
                        norm_groups=self.norm_groups,
                        dilation=dilation_rate,
                        lr_scale=1,
                        freeze_norm=False,
                        act="relu",
                        norm_name=name + "_gn",
                        initializer=None,
                        bias_attr=False,
                        name=name + "_gn")

        conv = fluid.layers.conv2d(
            conv,
            num_filters2,
            filter_size=3,
            padding=dilation_rate,
            dilation=dilation_rate,
            act="relu",
            param_attr=ParamAttr(name=name + "_conv_w"),
            bias_attr=ParamAttr(name=name + "_conv_b"),
        )

        if dropout_prob > 0:
            conv = fluid.layers.dropout(conv, dropout_prob=dropout_prob)

        return conv
示例#5
0
    def _lite_conv(self, x, out_c, act=None, name=None):
        conv1 = ConvNorm(input=x,
                         num_filters=x.shape[1],
                         filter_size=5,
                         groups=x.shape[1],
                         norm_type='bn',
                         act='relu6',
                         initializer=Xavier(),
                         name=name + '.depthwise',
                         norm_name=name + '.depthwise.bn')

        conv2 = ConvNorm(input=conv1,
                         num_filters=out_c,
                         filter_size=1,
                         norm_type='bn',
                         act=act,
                         initializer=Xavier(),
                         name=name + '.pointwise_linear',
                         norm_name=name + '.pointwise_linear.bn')

        conv3 = ConvNorm(input=conv2,
                         num_filters=out_c,
                         filter_size=1,
                         norm_type='bn',
                         act='relu6',
                         initializer=Xavier(),
                         name=name + '.pointwise',
                         norm_name=name + '.pointwise.bn')

        conv4 = ConvNorm(input=conv3,
                         num_filters=out_c,
                         filter_size=5,
                         groups=out_c,
                         norm_type='bn',
                         act=act,
                         initializer=Xavier(),
                         name=name + '.depthwise_linear',
                         norm_name=name + '.depthwise_linear.bn')

        return conv4
示例#6
0
 def _deconv_upsample(self, x, out_c, name=None):
     conv1 = ConvNorm(input=x,
                      num_filters=out_c,
                      filter_size=1,
                      norm_type='bn',
                      act='relu6',
                      name=name + '.pointwise',
                      initializer=Xavier(),
                      norm_name=name + '.pointwise.bn')
     conv2 = fluid.layers.conv2d_transpose(
         input=conv1,
         num_filters=out_c,
         filter_size=4,
         padding=1,
         stride=2,
         groups=out_c,
         param_attr=ParamAttr(name=name + '.deconv.weights',
                              initializer=Xavier()),
         bias_attr=False)
     bn = fluid.layers.batch_norm(
         input=conv2,
         act='relu6',
         param_attr=ParamAttr(name=name + '.deconv.bn.scale',
                              regularizer=L2Decay(0.)),
         bias_attr=ParamAttr(name=name + '.deconv.bn.offset',
                             regularizer=L2Decay(0.)),
         moving_mean_name=name + '.deconv.bn.mean',
         moving_variance_name=name + '.deconv.bn.variance')
     conv3 = ConvNorm(input=bn,
                      num_filters=out_c,
                      filter_size=1,
                      norm_type='bn',
                      act='relu6',
                      name=name + '.normal',
                      initializer=Xavier(),
                      norm_name=name + '.normal.bn')
     return conv3
示例#7
0
 def _mask_conv_head(self, roi_feat, num_convs, norm_type):
     if norm_type == 'gn':
         for i in range(num_convs):
             layer_name = "mask_inter_feat_" + str(i + 1)
             fan = self.conv_dim * 3 * 3
             initializer = MSRA(uniform=False, fan_in=fan)
             roi_feat = ConvNorm(roi_feat,
                                 self.conv_dim,
                                 3,
                                 act='relu',
                                 dilation=self.dilation,
                                 initializer=initializer,
                                 norm_type=self.norm_type,
                                 name=layer_name,
                                 norm_name=layer_name)
     else:
         for i in range(num_convs):
             layer_name = "mask_inter_feat_" + str(i + 1)
             fan = self.conv_dim * 3 * 3
             initializer = MSRA(uniform=False, fan_in=fan)
             roi_feat = fluid.layers.conv2d(
                 input=roi_feat,
                 num_filters=self.conv_dim,
                 filter_size=3,
                 padding=1 * self.dilation,
                 act='relu',
                 stride=1,
                 dilation=self.dilation,
                 name=layer_name,
                 param_attr=ParamAttr(name=layer_name + '_w',
                                      initializer=initializer),
                 bias_attr=ParamAttr(name=layer_name + '_b',
                                     learning_rate=2.,
                                     regularizer=L2Decay(0.)))
     fan = roi_feat.shape[1] * 2 * 2
     feat = fluid.layers.conv2d_transpose(
         input=roi_feat,
         num_filters=self.conv_dim,
         filter_size=2,
         stride=2,
         act='relu',
         param_attr=ParamAttr(name='conv5_mask_w',
                              initializer=MSRA(uniform=False, fan_in=fan)),
         bias_attr=ParamAttr(name='conv5_mask_b',
                             learning_rate=2.,
                             regularizer=L2Decay(0.)))
     # print(feat)
     return feat
示例#8
0
 def _mask_conv_head(self,
                     roi_feat,
                     num_convs,
                     norm_type,
                     wb_scalar=1.0,
                     name=''):
     if norm_type == 'gn':
         for i in range(num_convs):
             layer_name = "mask_inter_feat_" + str(i + 1)
             if not self.share_mask_conv:
                 layer_name += name
             fan = self.conv_dim * 3 * 3
             initializer = MSRA(uniform=False, fan_in=fan)
             roi_feat = ConvNorm(roi_feat,
                                 self.conv_dim,
                                 3,
                                 act='relu',
                                 dilation=self.dilation,
                                 initializer=initializer,
                                 norm_type=self.norm_type,
                                 name=layer_name,
                                 norm_name=layer_name)
     else:
         for i in range(num_convs):
             layer_name = "mask_inter_feat_" + str(i + 1)
             if not self.share_mask_conv:
                 layer_name += name
             fan = self.conv_dim * 3 * 3
             initializer = MSRA(uniform=False, fan_in=fan)
             roi_feat = fluid.layers.conv2d(
                 input=roi_feat,
                 num_filters=self.conv_dim,
                 filter_size=3,
                 padding=1 * self.dilation,
                 act='relu',
                 stride=1,
                 dilation=self.dilation,
                 name=layer_name,
                 param_attr=ParamAttr(name=layer_name + '_w',
                                      initializer=initializer),
                 bias_attr=ParamAttr(name=layer_name + '_b',
                                     learning_rate=wb_scalar *
                                     self.lr_ratio,
                                     regularizer=L2Decay(0.)))
     return roi_feat
示例#9
0
    def get_output(self, body_dict):
        """
        Add FPN onto backbone.

        Args:
            body_dict(OrderedDict): Dictionary of variables and each element is the
                output of backbone.

        Return:
            fpn_dict(OrderedDict): A dictionary represents the output of FPN with
                their name.
            spatial_scale(list): A list of multiplicative spatial scale factor.
        """
        body_name_list = list(body_dict.keys())[::-1]
        num_backbone_stages = len(body_name_list)
        self.fpn_inner_output = [[] for _ in range(num_backbone_stages)]
        fpn_inner_name = 'fpn_inner_' + body_name_list[0]
        body_input = body_dict[body_name_list[0]]
        fan = body_input.shape[1]
        if self.norm_type:
            initializer = Xavier(fan_out=fan)
            self.fpn_inner_output[0] = ConvNorm(
                body_input,
                self.num_chan,
                1,
                initializer=initializer,
                norm_type=self.norm_type,
                name=fpn_inner_name,
                bn_name=fpn_inner_name)
        else:
            self.fpn_inner_output[0] = fluid.layers.conv2d(
                body_input,
                self.num_chan,
                1,
                param_attr=ParamAttr(
                    name=fpn_inner_name + "_w",
                    initializer=Xavier(fan_out=fan)),
                bias_attr=ParamAttr(
                    name=fpn_inner_name + "_b",
                    learning_rate=2.,
                    regularizer=L2Decay(0.)),
                name=fpn_inner_name)
        for i in range(1, num_backbone_stages):
            body_name = body_name_list[i]
            body_input = body_dict[body_name]
            top_output = self.fpn_inner_output[i - 1]
            fpn_inner_single = self._add_topdown_lateral(body_name, body_input,
                                                         top_output)
            self.fpn_inner_output[i] = fpn_inner_single
        fpn_dict = {}
        fpn_name_list = []
        for i in range(num_backbone_stages):
            fpn_name = 'fpn_' + body_name_list[i]
            fan = self.fpn_inner_output[i].shape[1] * 3 * 3
            if self.norm_type:
                initializer = Xavier(fan_out=fan)
                fpn_output = ConvNorm(
                    self.fpn_inner_output[i],
                    self.num_chan,
                    3,
                    initializer=initializer,
                    norm_type=self.norm_type,
                    name=fpn_name,
                    bn_name=fpn_name)
            else:
                fpn_output = fluid.layers.conv2d(
                    self.fpn_inner_output[i],
                    self.num_chan,
                    filter_size=3,
                    padding=1,
                    param_attr=ParamAttr(
                        name=fpn_name + "_w", initializer=Xavier(fan_out=fan)),
                    bias_attr=ParamAttr(
                        name=fpn_name + "_b",
                        learning_rate=2.,
                        regularizer=L2Decay(0.)),
                    name=fpn_name)
            fpn_dict[fpn_name] = fpn_output
            fpn_name_list.append(fpn_name)
        if not self.has_extra_convs and self.max_level - self.min_level == len(
                self.spatial_scale):
            body_top_name = fpn_name_list[0]
            body_top_extension = fluid.layers.pool2d(
                fpn_dict[body_top_name],
                1,
                'max',
                pool_stride=2,
                name=body_top_name + '_subsampled_2x')
            fpn_dict[body_top_name + '_subsampled_2x'] = body_top_extension
            fpn_name_list.insert(0, body_top_name + '_subsampled_2x')
            self.spatial_scale.insert(0, self.spatial_scale[0] * 0.5)
        # Coarser FPN levels introduced for RetinaNet
        highest_backbone_level = self.min_level + len(self.spatial_scale) - 1
        if self.has_extra_convs and self.max_level > highest_backbone_level:
            fpn_blob = body_dict[body_name_list[0]]
            for i in range(highest_backbone_level + 1, self.max_level + 1):
                fpn_blob_in = fpn_blob
                fpn_name = 'fpn_' + str(i)
                if i > highest_backbone_level + 1:
                    fpn_blob_in = fluid.layers.relu(fpn_blob)
                fan = fpn_blob_in.shape[1] * 3 * 3
                fpn_blob = fluid.layers.conv2d(
                    input=fpn_blob_in,
                    num_filters=self.num_chan,
                    filter_size=3,
                    stride=2,
                    padding=1,
                    param_attr=ParamAttr(
                        name=fpn_name + "_w", initializer=Xavier(fan_out=fan)),
                    bias_attr=ParamAttr(
                        name=fpn_name + "_b",
                        learning_rate=2.,
                        regularizer=L2Decay(0.)),
                    name=fpn_name)
                fpn_dict[fpn_name] = fpn_blob
                fpn_name_list.insert(0, fpn_name)
                self.spatial_scale.insert(0, self.spatial_scale[0] * 0.5)
        res_dict = OrderedDict([(k, fpn_dict[k]) for k in fpn_name_list])
        return res_dict, self.spatial_scale
示例#10
0
    def dense_aspp(self, input, name=None):
        dropout0 = 0.1
        d_feature0 = 512
        d_feature1 = 256

        aspp3 = self.dense_aspp_block(input,
                                      num_filters1=d_feature0,
                                      num_filters2=d_feature1,
                                      dropout_prob=dropout0,
                                      name=name + '_aspp3',
                                      dilation_rate=3)
        conv = fluid.layers.concat([aspp3, input], axis=1)

        aspp6 = self.dense_aspp_block(conv,
                                      num_filters1=d_feature0,
                                      num_filters2=d_feature1,
                                      dropout_prob=dropout0,
                                      name=name + '_aspp6',
                                      dilation_rate=6)
        conv = fluid.layers.concat([aspp6, conv], axis=1)

        aspp12 = self.dense_aspp_block(conv,
                                       num_filters1=d_feature0,
                                       num_filters2=d_feature1,
                                       dropout_prob=dropout0,
                                       name=name + '_aspp12',
                                       dilation_rate=12)
        conv = fluid.layers.concat([aspp12, conv], axis=1)

        aspp18 = self.dense_aspp_block(conv,
                                       num_filters1=d_feature0,
                                       num_filters2=d_feature1,
                                       dropout_prob=dropout0,
                                       name=name + '_aspp18',
                                       dilation_rate=18)
        conv = fluid.layers.concat([aspp18, conv], axis=1)

        aspp24 = self.dense_aspp_block(conv,
                                       num_filters1=d_feature0,
                                       num_filters2=d_feature1,
                                       dropout_prob=dropout0,
                                       name=name + '_aspp24',
                                       dilation_rate=24)

        conv = fluid.layers.concat([aspp3, aspp6, aspp12, aspp18, aspp24],
                                   axis=1)

        conv = ConvNorm(conv,
                        num_filters=self.num_chan,
                        filter_size=1,
                        stride=1,
                        groups=1,
                        norm_decay=0.,
                        norm_type='gn',
                        norm_groups=self.norm_groups,
                        dilation=1,
                        lr_scale=1,
                        freeze_norm=False,
                        act="relu",
                        norm_name=name + "_dense_aspp_reduce_gn",
                        initializer=None,
                        bias_attr=False,
                        name=name + "_dense_aspp_reduce_gn")

        return conv
示例#11
0
    def _fcos_head(self, features, fpn_stride, fpn_scale, is_training=False):
        """
        Args:
            features (Variables): feature map from FPN
            fpn_stride     (int): the stride of current feature map
            is_training   (bool): whether is train or test mode
        """
        subnet_blob_cls = features
        subnet_blob_reg = features
        in_channles = features.shape[1]
        for lvl in range(0, self.num_convs):
            conv_cls_name = 'fcos_head_cls_tower_conv_{}'.format(lvl)
            subnet_blob_cls = ConvNorm(
                input=subnet_blob_cls,
                num_filters=in_channles,
                filter_size=3,
                stride=1,
                norm_type=self.norm_type,
                act='relu',
                initializer=Normal(
                    loc=0., scale=0.01),
                bias_attr=True,
                norm_name=conv_cls_name + "_norm",
                name=conv_cls_name)
            conv_reg_name = 'fcos_head_reg_tower_conv_{}'.format(lvl)
            subnet_blob_reg = ConvNorm(
                input=subnet_blob_reg,
                num_filters=in_channles,
                filter_size=3,
                stride=1,
                norm_type=self.norm_type,
                act='relu',
                initializer=Normal(
                    loc=0., scale=0.01),
                bias_attr=True,
                norm_name=conv_reg_name + "_norm",
                name=conv_reg_name)
        conv_cls_name = "fcos_head_cls"
        bias_init_value = -math.log((1 - self.prior_prob) / self.prior_prob)
        cls_logits = fluid.layers.conv2d(
            input=subnet_blob_cls,
            num_filters=self.num_classes,
            filter_size=3,
            stride=1,
            padding=1,
            param_attr=ParamAttr(
                name=conv_cls_name + "_weights",
                initializer=Normal(
                    loc=0., scale=0.01)),
            bias_attr=ParamAttr(
                name=conv_cls_name + "_bias",
                initializer=Constant(value=bias_init_value)),
            name=conv_cls_name)
        conv_reg_name = "fcos_head_reg"
        bbox_reg = fluid.layers.conv2d(
            input=subnet_blob_reg,
            num_filters=4,
            filter_size=3,
            stride=1,
            padding=1,
            param_attr=ParamAttr(
                name=conv_reg_name + "_weights",
                initializer=Normal(
                    loc=0., scale=0.01)),
            bias_attr=ParamAttr(
                name=conv_reg_name + "_bias", initializer=Constant(value=0)),
            name=conv_reg_name)
        bbox_reg = bbox_reg * fpn_scale
        if self.norm_reg_targets:
            bbox_reg = fluid.layers.relu(bbox_reg)
            if not is_training:
                bbox_reg = bbox_reg * fpn_stride
        else:
            bbox_reg = fluid.layers.exp(bbox_reg)

        conv_centerness_name = "fcos_head_centerness"
        if self.centerness_on_reg:
            subnet_blob_ctn = subnet_blob_reg
        else:
            subnet_blob_ctn = subnet_blob_cls
        centerness = fluid.layers.conv2d(
            input=subnet_blob_ctn,
            num_filters=1,
            filter_size=3,
            stride=1,
            padding=1,
            param_attr=ParamAttr(
                name=conv_centerness_name + "_weights",
                initializer=Normal(
                    loc=0., scale=0.01)),
            bias_attr=ParamAttr(
                name=conv_centerness_name + "_bias",
                initializer=Constant(value=0)),
            name=conv_centerness_name)
        return cls_logits, bbox_reg, centerness