def fpn(up_from, up_to): ch = up_to.shape[1] b_attr = ParamAttr(learning_rate=2., regularizer=L2Decay(0.)) conv1 = fluid.layers.conv2d( up_from, ch, 1, act='relu', bias_attr=b_attr) if self.use_transposed_conv2d: w_attr = ParamAttr( learning_rate=0., regularizer=L2Decay(0.), initializer=Bilinear()) upsampling = fluid.layers.conv2d_transpose( conv1, ch, output_size=None, filter_size=4, padding=1, stride=2, groups=ch, param_attr=w_attr, bias_attr=False, use_cudnn=True) else: upsampling = fluid.layers.resize_bilinear( conv1, out_shape=up_to.shape[2:]) conv2 = fluid.layers.conv2d( up_to, ch, 1, act='relu', bias_attr=b_attr) if self.is_infer: upsampling = fluid.layers.crop(upsampling, shape=conv2) # eltwise mul conv_fuse = upsampling * conv2 return conv_fuse
def fpn(self, up_from, up_to, name, is_sampling=False): ch = up_to.shape[1] b_attr = ParamAttr(learning_rate=2., regularizer=L2Decay(0.)) conv1 = fluid.layers.conv2d(up_from, ch, 1, act='relu', bias_attr=b_attr, name=name + "_fpn_conv1") if is_sampling: w_attr = ParamAttr(learning_rate=0., regularizer=L2Decay(0.), initializer=Bilinear()) upsampling = fluid.layers.conv2d_transpose(conv1, ch, output_size=None, filter_size=4, padding=1, stride=2, groups=ch, param_attr=w_attr, bias_attr=False, use_cudnn=True, name=name + "_fpn_upsampling") else: upsampling = conv1 b_attr = ParamAttr(learning_rate=2., regularizer=L2Decay(0.)) conv2 = fluid.layers.conv2d(up_to, ch, 1, act='relu', bias_attr=b_attr, name=name + "_fpn_conv2") # eltwise mul conv_fuse = upsampling * conv2 return fluid.layers.relu(x=conv_fuse)