Пример #1
0
 def __init__(self, layer_num, ch_out, name=None):
     super(ShortCut, self).__init__()
     shortcut_conv = Sequential()
     ch_in = ch_out * 2
     for i in range(layer_num):
         fan_out = 3 * 3 * ch_out
         std = math.sqrt(2. / fan_out)
         in_channels = ch_in if i == 0 else ch_out
         shortcut_name = name + '.conv.{}'.format(i)
         shortcut_conv.add_sublayer(
             shortcut_name,
             Conv2D(in_channels=in_channels,
                    out_channels=ch_out,
                    kernel_size=3,
                    padding=1,
                    weight_attr=ParamAttr(initializer=Normal(0, std)),
                    bias_attr=ParamAttr(learning_rate=2.,
                                        regularizer=L2Decay(0.))))
         if i < layer_num - 1:
             shortcut_conv.add_sublayer(shortcut_name + '.act', ReLU())
     self.shortcut = self.add_sublayer('short', shortcut_conv)
 def __init__(self,
              mask_roi_extractor=None,
              num_convs=0,
              feat_in=2048,
              feat_out=256,
              mask_num_stages=1,
              share_bbox_feat=False):
     super(MaskFeat, self).__init__()
     self.num_convs = num_convs
     self.feat_in = feat_in
     self.feat_out = feat_out
     self.mask_roi_extractor = mask_roi_extractor
     self.mask_num_stages = mask_num_stages
     self.share_bbox_feat = share_bbox_feat
     self.upsample_module = []
     fan_conv = feat_out * 3 * 3
     fan_deconv = feat_out * 2 * 2
     for i in range(self.mask_num_stages):
         name = 'stage_{}'.format(i)
         mask_conv = Sequential()
         for j in range(self.num_convs):
             conv_name = 'mask_inter_feat_{}'.format(j + 1)
             mask_conv.add_sublayer(
                 conv_name,
                 Conv2D(in_channels=feat_in if j == 0 else feat_out,
                        out_channels=feat_out,
                        kernel_size=3,
                        padding=1,
                        weight_attr=ParamAttr(initializer=KaimingNormal(
                            fan_in=fan_conv)),
                        bias_attr=ParamAttr(learning_rate=2.,
                                            regularizer=L2Decay(0.))))
             mask_conv.add_sublayer(conv_name + 'act', ReLU())
         mask_conv.add_sublayer(
             'conv5_mask',
             Conv2DTranspose(
                 in_channels=self.feat_in,
                 out_channels=self.feat_out,
                 kernel_size=2,
                 stride=2,
                 weight_attr=ParamAttr(initializer=KaimingNormal(
                     fan_in=fan_deconv)),
                 bias_attr=ParamAttr(learning_rate=2.,
                                     regularizer=L2Decay(0.))))
         mask_conv.add_sublayer('conv5_mask' + 'act', ReLU())
         upsample = self.add_sublayer(name, mask_conv)
         self.upsample_module.append(upsample)