Example #1
0
    def test_simple_conv2d(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            images = layers.data(name='pixel', shape=[3, 48, 48], dtype='int32')
            layers.conv2d(input=images, num_filters=3, filter_size=[4, 4])

        print(str(program))
Example #2
0
    def test_simple_conv2d(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            images = layers.data(name='pixel', shape=[3, 48, 48], dtype='int32')
            layers.conv2d(input=images, num_filters=3, filter_size=[4, 4])

        print(str(program))
Example #3
0
def proto_net(x):
    x = P.conv2d(x, 256, filter_size=(3, 3), stride=1, padding=1,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="proto_net.0.weight"),
                 bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="proto_net.0.bias"))
    x = P.relu(x)

    x = P.conv2d(x, 256, filter_size=(3, 3), stride=1, padding=1,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="proto_net.2.weight"),
                 bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="proto_net.2.bias"))
    x = P.relu(x)

    x = P.conv2d(x, 256, filter_size=(3, 3), stride=1, padding=1,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="proto_net.4.weight"),
                 bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="proto_net.4.bias"))
    x = P.relu(x)

    x = P.resize_bilinear(x, scale=float(2))
    x = P.relu(x)

    x = P.conv2d(x, 256, filter_size=(3, 3), stride=1, padding=1,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="proto_net.8.weight"),
                 bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="proto_net.8.bias"))
    x = P.relu(x)

    x = P.conv2d(x, 32, filter_size=(1, 1), stride=1,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="proto_net.10.weight"),
                 bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="proto_net.10.bias"))
    return x
Example #4
0
def PredictionModule(x,
                     num_priors,
                     num_classes,
                     mask_dim,
                     shared_conv_w,
                     shared_conv_b,
                     shared_bbox_w,
                     shared_bbox_b,
                     shared_conf_w,
                     shared_conf_b,
                     shared_mask_w,
                     shared_mask_b):
    '''
    改编自DSSD算法中的PredictionModule,改成了3x3卷积。3个分支分别预测bbox、conf、mask系数。
               x
             / | \
        bbox conf mask
    '''
    x = P.conv2d(x, 256, filter_size=(3, 3), stride=1, padding=1,
                 param_attr=shared_conv_w,
                 bias_attr=shared_conv_b)
    x = P.relu(x)

    bbox_x = x
    conf_x = x
    mask_x = x

    bbox = P.conv2d(bbox_x, num_priors * 4, filter_size=(3, 3), stride=1, padding=1,
                    param_attr=shared_bbox_w,
                    bias_attr=shared_bbox_b)
    bbox = P.transpose(bbox, perm=[0, 2, 3, 1])
    bbox = P.reshape(bbox, (P.shape(bbox)[0], -1, 4))

    conf = P.conv2d(conf_x, num_priors * num_classes, filter_size=(3, 3), stride=1, padding=1,
                    param_attr=shared_conf_w,
                    bias_attr=shared_conf_b)
    conf = P.transpose(conf, perm=[0, 2, 3, 1])
    conf = P.reshape(conf, (P.shape(conf)[0], -1, num_classes))

    mask = P.conv2d(mask_x, num_priors * mask_dim, filter_size=(3, 3), stride=1, padding=1,
                    param_attr=shared_mask_w,
                    bias_attr=shared_mask_b)
    mask = P.transpose(mask, perm=[0, 2, 3, 1])
    mask = P.reshape(mask, (P.shape(mask)[0], -1, mask_dim))
    mask = P.tanh(mask)

    preds = {'loc': bbox, 'conf': conf, 'mask': mask}
    return preds
Example #5
0
def conv2d(x, filters, filter_size, padding=1, bias=False):
    conv_out = layers.conv2d(input=x,
                             padding=padding,
                             num_filters=filters,
                             filter_size=filter_size,
                             bias_attr=bias)
    return conv_out
Example #6
0
    def func(self, place):
        shape = [2, 4, 3, 3]
        eps = 0.005
        dtype = np.float32 if fluid.core.is_compiled_with_rocm(
        ) else np.float64
        x = layers.data('x', shape, False, dtype)

        # condition of depthwise conv:
        # use_cudnn == False
        # groups == filters
        # num_filters % num_channels == 0
        y = layers.conv2d(x,
                          shape[1],
                          1,
                          groups=shape[1],
                          bias_attr=False,
                          use_cudnn=False)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
def conv2d_unit(x, filters, kernels, stride, padding, name, is_test,
                trainable):
    x = P.conv2d(input=x,
                 num_filters=filters,
                 filter_size=kernels,
                 stride=stride,
                 padding=padding,
                 act=None,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(
                     0.0, 0.01),
                                      name=name + ".conv.weights",
                                      trainable=trainable),
                 bias_attr=False)
    bn_name = name + ".bn"
    x = P.batch_norm(
        input=x,
        act=None,
        is_test=is_test,
        param_attr=ParamAttr(initializer=fluid.initializer.Constant(1.0),
                             regularizer=L2Decay(0.),
                             trainable=trainable,
                             name=bn_name + '.scale'),
        bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0),
                            regularizer=L2Decay(0.),
                            trainable=trainable,
                            name=bn_name + '.offset'),
        moving_mean_name=bn_name + '.mean',
        moving_variance_name=bn_name + '.var')
    x = P.leaky_relu(x, alpha=0.1)
    return x
Example #8
0
    def conv_bn_layer(self,
                      input,
                      num_filters,
                      filter_size,
                      stride,
                      padding,
                      name=None):
        """Create conv+bn layer"""
        conv = FL.conv2d(input=input,
                         num_filters=num_filters,
                         filter_size=filter_size,
                         stride=stride,
                         padding=padding,
                         groups=1,
                         act=None,
                         param_attr=ParamAttr(name=name + "_weights"),
                         bias_attr=False,
                         name=name + '.conv2d.output.1')

        bn_name = name + ".bn"
        return FL.batch_norm(
            input=conv,
            act=None,
            name=bn_name + '.output.1',
            param_attr=ParamAttr(name=bn_name + '_scale'),
            bias_attr=ParamAttr(bn_name + '_offset'),
            moving_mean_name=bn_name + '_mean',
            moving_variance_name=bn_name + '_variance',
        )
Example #9
0
    def func(self, place):
        shape = [2, 2, 3, 3]
        eps = 0.005
        dtype = np.float32 if fluid.core.is_compiled_with_rocm(
        ) else np.float64
        x = layers.data('x', shape, False, dtype)
        y = layers.conv2d(input=x,
                          num_filters=2,
                          filter_size=1,
                          padding=[1, 0, 1, 0],
                          bias_attr=False,
                          use_cudnn=True,
                          groups=1,
                          data_format="NHWC")
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
def _DBL(input, num_filters, filter_size, padding=1, name=None):
    conv = pfl.conv2d(input=input,
                      num_filters=num_filters,
                      filter_size=filter_size,
                      padding=padding,
                      name=(name + '_conv2d') if name else None)
    bn = pfl.batch_norm(input=conv, name=(name + '_conv2d') if name else None)
    act = pfl.leaky_relu(bn, name=(name + '_act') if name else None)
    return act
Example #11
0
def Yolact(backbone_name, inputs, num_classes, mask_dim, num_priors_list, is_test,
           transform=None, input_size=550, use_fast_prep=False):

    if use_fast_prep:
        inputs = fast_preprocess_layer(inputs, input_size, transform.normalize, transform.subtract_means, transform.to_float)

    # 冻结层时(即trainable=False),bn的均值、标准差也还是会变化,只有设置is_test=True才保证不变
    trainable = not is_test
    if backbone_name == 'darknet53':
        backbone_s8, backbone_s16, backbone_s32 = DarkNet53(inputs, is_test, trainable)
    elif backbone_name == 'resnet50':
        backbone_s8, backbone_s16, backbone_s32 = Resnet50(inputs, is_test, trainable, use_dcn=False)
    elif backbone_name == 'resnet101':
        backbone_s8, backbone_s16, backbone_s32 = Resnet101(inputs, is_test, trainable, use_dcn=False)
    elif backbone_name == 'resnet50dcn':
        backbone_s8, backbone_s16, backbone_s32 = Resnet50(inputs, is_test, trainable, use_dcn=True)

    s8, s16, s32, s64, s128 = FPN(backbone_s8, backbone_s16, backbone_s32)

    # 1.mask原型,默认有32个原型,即通道数是32
    proto_x = s8
    proto_out = proto_net(proto_x)
    proto_out = P.relu(proto_out)
    proto_out = P.transpose(proto_out, perm=[0, 2, 3, 1])

    # 2.预测头。第一个PredictionModule里的4个卷积层的参数被后面的PredictionModule共享
    shared_conv_w = ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="prediction_layers.0.upfeature.0.weight")
    shared_conv_b = ParamAttr(initializer=fluid.initializer.Constant(0.0), name="prediction_layers.0.upfeature.0.bias")
    shared_bbox_w = ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="prediction_layers.0.bbox_layer.weight")
    shared_bbox_b = ParamAttr(initializer=fluid.initializer.Constant(0.0), name="prediction_layers.0.bbox_layer.bias")
    shared_conf_w = ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="prediction_layers.0.conf_layer.weight")
    shared_conf_b = ParamAttr(initializer=fluid.initializer.Constant(0.0), name="prediction_layers.0.conf_layer.bias")
    shared_mask_w = ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="prediction_layers.0.mask_layer.weight")
    shared_mask_b = ParamAttr(initializer=fluid.initializer.Constant(0.0), name="prediction_layers.0.mask_layer.bias")

    pred_outs = {'loc': [], 'conf': [], 'mask': []}
    for i, tensor in enumerate([s8, s16, s32, s64, s128]):
        num_priors = num_priors_list[i]
        # 预测bbox、conf、mask
        preds = PredictionModule(tensor, num_priors, num_classes, mask_dim, shared_conv_w, shared_conv_b, shared_bbox_w, shared_bbox_b, shared_conf_w, shared_conf_b, shared_mask_w, shared_mask_b)
        for key, value in preds.items():
            pred_outs[key].append(value)

    for key, value in pred_outs.items():
        pred_outs[key] = P.concat(value, axis=1)
    pred_outs['proto'] = proto_out

    if is_test:   # 预测状态
        print('----test----')
        pred_outs['conf'] = P.softmax(pred_outs['conf'])
    else:   # 训练状态
        print('----train----')
        pred_outs['segm'] = P.conv2d(s8, num_classes-1, filter_size=(1, 1),
                                     param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="semantic_seg_conv.weight"),
                                     bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="semantic_seg_conv.bias"))
    return pred_outs
 def forward(self, x):
     if self.f is not None:
         # expand kernel channels
         kernel = self.f.expand(x.size(1), -1, -1, -1)
         x = layers.conv2d(x,
                           kernel,
                           stride=self.stride,
                           padding=int((self.f.shape[2] - 1) / 2),
                           groups=x.shape[1])
         return x
     else:
         return x
Example #13
0
 def __call__(self, input):
     return layers.conv2d(input=input,
                          num_filters=num_filters,
                          filter_size=filter_size,
                          stride=stride,
                          padding=padding,
                          dilation=dilation,
                          groups=groups,
                          param_attr=self.attr_holder.param_attr,
                          bias_attr=self.attr_holder.bias_attr,
                          use_cudnn=use_cudnn,
                          act=act)
Example #14
0
def maskiou_net(x, num_class):
    x = P.conv2d(x, 8, filter_size=(3, 3), stride=2, padding=1,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="maskiou_net.0.weight"),
                 bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="maskiou_net.0.bias"))
    x = P.relu(x)

    x = P.conv2d(x, 16, filter_size=(3, 3), stride=2, padding=1,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="maskiou_net.2.weight"),
                 bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="maskiou_net.2.bias"))
    x = P.relu(x)

    x = P.conv2d(x, 32, filter_size=(3, 3), stride=2, padding=1,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="maskiou_net.4.weight"),
                 bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="maskiou_net.4.bias"))
    x = P.relu(x)

    x = P.conv2d(x, 64, filter_size=(3, 3), stride=2, padding=1,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="maskiou_net.6.weight"),
                 bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="maskiou_net.6.bias"))
    x = P.relu(x)

    x = P.conv2d(x, 128, filter_size=(3, 3), stride=2, padding=1,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="maskiou_net.8.weight"),
                 bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="maskiou_net.8.bias"))
    x = P.relu(x)

    x = P.conv2d(x, num_class, filter_size=(1, 1), stride=1, padding=0,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="maskiou_net.10.weight"),
                 bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="maskiou_net.10.bias"))
    x = P.relu(x)
    return x
Example #15
0
    def func(self, place):
        shape = [2, 3, 3, 3]
        eps = 0.005
        dtype = np.float64
        x = layers.data('x', shape, False, dtype)
        y = layers.conv2d(x, 2, 1, padding=1, bias_attr=False)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
        gradient_checker.double_grad_check(
            [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps)
Example #16
0
    def spatio_conv_layer(self, x, Ks, c_in, c_out, name):
        """Spatio convolution layer"""
        _, T, n, _ = x.shape
        if c_in > c_out:
            x_input = fl.conv2d(input=x,
                                num_filters=c_out,
                                filter_size=[1, 1],
                                stride=[1, 1],
                                padding="SAME",
                                data_format="NHWC",
                                param_attr=fluid.ParamAttr(name="%s_conv2d_1" %
                                                           name))
        elif c_in < c_out:
            # if the size of input channel is less than the output,
            # padding x to the same size of output channel.
            pad = fl.fill_constant_batch_size_like(
                input=x,
                shape=[-1, T, n, c_out - c_in],
                dtype="float32",
                value=0.0)
            x_input = fl.concat([x, pad], axis=3)
        else:
            x_input = x

        for i in range(Ks):
            # x_input shape: [B,T, num_nodes, c_out]
            x_input = fl.reshape(x_input, [-1, c_out])

            x_input = self.message_passing(self.gw,
                                           x_input,
                                           name="%s_mp_%d" % (name, i),
                                           norm=self.gw.node_feat["norm"])

            x_input = fl.fc(x_input,
                            size=c_out,
                            bias_attr=False,
                            param_attr=fluid.ParamAttr(name="%s_gcn_fc_%d" %
                                                       (name, i)))

            bias = fluid.layers.create_parameter(shape=[c_out],
                                                 dtype='float32',
                                                 is_bias=True,
                                                 name='%s_gcn_bias_%d' %
                                                 (name, i))
            x_input = fluid.layers.elementwise_add(x_input, bias, act="relu")

            x_input = fl.reshape(x_input, [-1, T, n, c_out])

        return x_input
Example #17
0
def FPN(s8, s16, s32):
    # y1
    y1 = P.conv2d(s32, 256, filter_size=(1, 1),
                  param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="fpn.lat_layers.0.weight"),
                  bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="fpn.lat_layers.0.bias"))

    # y2
    h_m, w_m = P.shape(s16)[2], P.shape(s16)[3]
    x = P.image_resize(y1, out_shape=[h_m, w_m], resample="BILINEAR")
    y2 = P.conv2d(s16, 256, filter_size=(1, 1),
                  param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="fpn.lat_layers.1.weight"),
                  bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="fpn.lat_layers.1.bias"))
    y2 = P.elementwise_add(x, y2, act=None)

    # y3
    h_s, w_s = P.shape(s8)[2], P.shape(s8)[3]
    x = P.image_resize(y2, out_shape=[h_s, w_s], resample="BILINEAR")
    y3 = P.conv2d(s8, 256, filter_size=(1, 1),
                  param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="fpn.lat_layers.2.weight"),
                  bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="fpn.lat_layers.2.bias"))
    y3 = P.elementwise_add(x, y3, act=None)

    # pred
    y1 = P.conv2d(y1, 256, filter_size=(3, 3), padding=1,
                  param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="fpn.pred_layers.0.weight"),
                  bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="fpn.pred_layers.0.bias"))
    y1 = P.relu(y1)
    y2 = P.conv2d(y2, 256, filter_size=(3, 3), padding=1,
                  param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="fpn.pred_layers.1.weight"),
                  bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="fpn.pred_layers.1.bias"))
    y2 = P.relu(y2)
    y3 = P.conv2d(y3, 256, filter_size=(3, 3), padding=1,
                  param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="fpn.pred_layers.2.weight"),
                  bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="fpn.pred_layers.2.bias"))
    y3 = P.relu(y3)

    # 再对y1下采样2次
    s64 = P.conv2d(y1, 256, filter_size=(3, 3), stride=2, padding=1,
                  param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="fpn.downsample_layers.0.weight"),
                  bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="fpn.downsample_layers.0.bias"))
    s128 = P.conv2d(s64, 256, filter_size=(3, 3), stride=2, padding=1,
                  param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name="fpn.downsample_layers.1.weight"),
                  bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0), name="fpn.downsample_layers.1.bias"))
    return y3, y2, y1, s64, s128
Example #18
0
    def test_nvprof(self):
        if not fluid.core.is_compiled_with_cuda():
            return
        epoc = 8
        dshape = [4, 3, 28, 28]
        data = layers.data(name='data', shape=[3, 28, 28], dtype='float32')
        conv = layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1])

        place = fluid.CUDAPlace(0)
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())

        output_file = 'cuda_profiler.txt'
        with profiler.cuda_profiler(output_file, 'csv') as nvprof:
            for i in range(epoc):
                input = np.random.random(dshape).astype('float32')
                exe.run(fluid.default_main_program(), feed={'data': input})
        os.remove(output_file)
Example #19
0
    def test_nvprof(self):
        if not fluid.core.is_compiled_with_cuda():
            return
        epoc = 8
        dshape = [4, 3, 28, 28]
        data = layers.data(name='data', shape=[3, 28, 28], dtype='float32')
        conv = layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1])

        place = fluid.CUDAPlace(0)
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())

        output_file = 'cuda_profiler.txt'
        with profiler.cuda_profiler(output_file, 'csv') as nvprof:
            for i in range(epoc):
                input = np.random.random(dshape).astype('float32')
                exe.run(fluid.default_main_program(), feed={'data': input})
        os.remove(output_file)
Example #20
0
 def conv_layer(self,
                input,
                num_filters,
                filter_size,
                stride,
                padding,
                name=None):
     """Create conv layer"""
     conv = FL.conv2d(input=input,
                      num_filters=num_filters,
                      filter_size=filter_size,
                      stride=stride,
                      padding=padding,
                      groups=1,
                      act=None,
                      param_attr=ParamAttr(name=name + "_weights"),
                      bias_attr=False,
                      name=name + '.conv2d.output.1')
     return conv
Example #21
0
 def fully_con_layer(self, x, n, channel, name):
     """Fully connected layer"""
     bt_init = fluid.initializer.ConstantInitializer(value=0.0)
     bt = fl.create_parameter(
         shape=[n, 1],
         dtype="float32",
         attr=fluid.ParamAttr(name="%s_bt" % name,
                              trainable=True,
                              initializer=bt_init),
     )
     x_conv = fl.conv2d(input=x,
                        num_filters=1,
                        filter_size=[1, 1],
                        stride=[1, 1],
                        padding="SAME",
                        data_format="NHWC",
                        param_attr=fluid.ParamAttr(name="%s_conv2d" % name))
     x_conv = x_conv + bt
     return x_conv
Example #22
0
    def net(self, inputs=None):
        if self.inputs is None:
            self.inputs = inputs or fluid.layers.data(
                name=self.name + "_z", shape=[self.z_dim], dtype='float32')

        act = 'relu'
        x = fc(self.inputs, 1024 * 4 * 4, act=act, name=self.name + "_fc_1")
        x = reshape(x, [-1, 1024, 4, 4], name=self.name + "_reshape")

        x = up_sampling_2(x, 512, name=self.name + "_512×8×8")
        x = up_sampling_2(x, 256, name=self.name + "_256×16×16")
        x = up_sampling_2(x, 128, name=self.name + "_128×32×32")
        x = up_sampling_2(x, 64, name=self.name + "_64×64×64")
        x = up_sampling_2(x, 32, name=self.name + "_32×128×128")

        out = conv2d(x,
                     3,
                     3,
                     padding=1,
                     act='tanh',
                     name=self.name + "_conv2d_out")
        return out
def CNNCharEmbedding(input, vocab_size, cnn_dim, n_kernals, hidden_dim,
                     dropout_rate, output_dropout, embed_dim):
    """
    CNN generates character embedding.
    Structed as:
    - embed(x)      len_word X hidden_dim
    - Dropout(x)    
    - CNN(x)        len_word X hidden_dim
    - activation(x)
    - pool          hidden_dim
    - fc            embed_dim
    - Dropout.
    Return:
     embedded Tensor shaped like [bsz, len_seq, embed_dim]
    """
    # input.size [batch_size, len_sentence, len_word]
    bsz, len_seq, len_word = input.shape
    emb = fluid.embedding(input, size=[vocab_size, hidden_dim])
    # emb.size [batch_size, len_sentence, len_word, hidden_dim]
    emb = layers.dropout(x=emb, dropout_prob=dropout_rate)
    emb = layers.reshape(x=emb, shape=(bsz * len_seq, 1, len_word, hidden_dim))
    # emb.size [batch_size X len_sentence, 1, len_word, hidden_dim]
    emb = layers.conv2d(input=emb,
                        num_filters=n_kernals,
                        filter_size=(cnn_dim, hidden_dim),
                        padding=(cnn_dim - 1, 0),
                        act='relu')
    # emb.size [bsz X len_seq, n_kernals, len_word, 1]
    emb = layers.transpose(x=emb, perm=[0, 3, 2, 1])
    # emb.size [bsz X len_seq, 1, len_word, n_kernals]
    emb = layers.pool2d(input=emb, pool_size=[len_word, 1], pool_type='max')
    # emb.size [bsz X len_seq, 1, 1, n_kernals]
    emb = layers.fc(input=emb, size=embed_dim, num_flatten_dims=-1, act='tanh')
    # emb.size [bsz X len_seq, 1, 1, embed_dim]
    emb = layers.reshape(x=emb, shape=(bsz, len_seq, embed_dim))
    emb = layers.dropout(x=emb, dropout_prob=output_dropout)
    return emb
Example #24
0
    def net(self, inputs=None):
        if self.inputs is None:
            self.inputs = inputs or fluid.layers.data(
                name=self.name + "_image",
                shape=self.image_size,
                dtype='float32')

        x = conv2d(self.inputs,
                   32,
                   3,
                   stride=2,
                   padding=1,
                   act='leaky_relu',
                   name=self.name + "_conv2d_1")
        x = dropout(x, 0.25)

        x = down_sampling_2(x, 64, name=self.name + "_64×32*32")
        x = down_sampling_2(x, 128, name=self.name + "_128×16*16")
        x = down_sampling_2(x, 256, name=self.name + "_256×8*8")

        x = flatten(x, name=self.name + "_fc")
        x = fc(x, 1, act="sigmoid")

        return x
Example #25
0
 def conv_bn_layer(self,
                   input,
                   num_filters,
                   filter_size,
                   stride=1,
                   groups=1,
                   act=None,
                   name=None):
     conv = layers.conv2d(input=input,
                          num_filters=num_filters,
                          filter_size=filter_size,
                          stride=stride,
                          padding=(filter_size - 1) // 2,
                          groups=groups,
                          act=None,
                          param_attr=ParamAttr(name=name + "_weights"),
                          bias_attr=False)
     bn_name = "bn_" + name
     return layers.batch_norm(input=conv,
                              act=act,
                              param_attr=ParamAttr(name=bn_name + '_scale'),
                              bias_attr=ParamAttr(bn_name + '_offset'),
                              moving_mean_name=bn_name + '_mean',
                              moving_variance_name=bn_name + '_variance')
Example #26
0
    def net(self, inputs):
        print(inputs.shape)

        x = conv2d(inputs, 64, 3, padding=1, act='relu')
        x = conv2d(x, 64, 3, padding=1, act='relu')
        x = pool2d(x, 2, pool_stride=2)
        print(x.shape)

        x = conv2d(x, 128, 3, padding=1, act='relu')
        x = conv2d(x, 128, 3, padding=1, act='relu')
        x = pool2d(x, 2, pool_stride=2)
        print(x.shape)

        x = conv2d(x, 256, 3, padding=1, act='relu')
        x = conv2d(x, 256, 3, padding=1, act='relu')
        x = conv2d(x, 256, 3, padding=1, act='relu')
        x = pool2d(x, 2, pool_stride=2)
        print(x.shape)

        x = conv2d(x, 512, 3, padding=1, act='relu')
        x = conv2d(x, 512, 3, padding=1, act='relu')
        x = conv2d(x, 512, 3, padding=1, act='relu')
        x = pool2d(x, 2, pool_stride=2)
        print(x.shape)

        x = conv2d(x, 512, 3, padding=1, act='relu')
        x = conv2d(x, 512, 3, padding=1, act='relu')
        x = conv2d(x, 512, 3, padding=1, act='relu')
        x = pool2d(x, 2, pool_stride=2)
        print(x.shape)

        x = flatten(x)
        x = fc(x, 4096, act='relu')
        x = fc(x, 4096, act='relu')
        out = fc(x, self.class_num)
        print(out.shape)

        return out
Example #27
0
def up_sampling_2(x, num_filters, name, act='relu'):
    x = pixel_shuffle(x, 2)
    x = conv2d(x, num_filters, 3, padding=1, name=name + "_conv2d_1")
    x = batch_norm(x, act=act, name=name + "_bn")
    return x
def YOLOv3(inputs, initial_filters, num_classes, is_test, trainable=True):
    i32 = initial_filters
    i64 = i32 * 2
    i128 = i32 * 4
    i256 = i32 * 8
    i512 = i32 * 16
    i1024 = i32 * 32
    ''' darknet53部分,所有卷积层都没有偏移bias_attr=False '''
    x = conv2d_unit(inputs,
                    i32, (3, 3),
                    stride=1,
                    padding=1,
                    name='conv01',
                    is_test=is_test,
                    trainable=trainable)

    x = conv2d_unit(x,
                    i64, (3, 3),
                    stride=2,
                    padding=1,
                    name='conv02',
                    is_test=is_test,
                    trainable=trainable)
    x = stack_residual_block(x,
                             i32,
                             n=1,
                             conv_start_idx=3,
                             is_test=is_test,
                             trainable=trainable)

    x = conv2d_unit(x,
                    i128, (3, 3),
                    stride=2,
                    padding=1,
                    name='conv05',
                    is_test=is_test,
                    trainable=trainable)
    x = stack_residual_block(x,
                             i64,
                             n=2,
                             conv_start_idx=6,
                             is_test=is_test,
                             trainable=trainable)

    x = conv2d_unit(x,
                    i256, (3, 3),
                    stride=2,
                    padding=1,
                    name='conv10',
                    is_test=is_test,
                    trainable=trainable)
    act11 = stack_residual_block(x,
                                 i128,
                                 n=8,
                                 conv_start_idx=11,
                                 is_test=is_test,
                                 trainable=trainable)

    x = conv2d_unit(act11,
                    i512, (3, 3),
                    stride=2,
                    padding=1,
                    name='conv27',
                    is_test=is_test,
                    trainable=trainable)
    act19 = stack_residual_block(x,
                                 i256,
                                 n=8,
                                 conv_start_idx=28,
                                 is_test=is_test,
                                 trainable=trainable)

    x = conv2d_unit(act19,
                    i1024, (3, 3),
                    stride=2,
                    padding=1,
                    name='conv44',
                    is_test=is_test,
                    trainable=trainable)
    act23 = stack_residual_block(x,
                                 i512,
                                 n=4,
                                 conv_start_idx=45,
                                 is_test=is_test,
                                 trainable=trainable)
    ''' darknet53部分结束,余下部分不再有残差块stack_residual_block() '''

    x = conv2d_unit(act23,
                    i512, (1, 1),
                    stride=1,
                    padding=0,
                    name='conv53',
                    is_test=is_test,
                    trainable=trainable)
    x = conv2d_unit(x,
                    i1024, (3, 3),
                    stride=1,
                    padding=1,
                    name='conv54',
                    is_test=is_test,
                    trainable=trainable)
    x = conv2d_unit(x,
                    i512, (1, 1),
                    stride=1,
                    padding=0,
                    name='conv55',
                    is_test=is_test,
                    trainable=trainable)
    x = conv2d_unit(x,
                    i1024, (3, 3),
                    stride=1,
                    padding=1,
                    name='conv56',
                    is_test=is_test,
                    trainable=trainable)
    lkrelu57 = conv2d_unit(x,
                           i512, (1, 1),
                           stride=1,
                           padding=0,
                           name='conv57',
                           is_test=is_test,
                           trainable=trainable)

    x = conv2d_unit(lkrelu57,
                    i1024, (3, 3),
                    stride=1,
                    padding=1,
                    name='conv58',
                    is_test=is_test,
                    trainable=trainable)
    y1 = P.conv2d(
        x,
        3 * (num_classes + 5),
        filter_size=(1, 1),
        param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01),
                             name="conv59.conv.weights"),
        bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0),
                            name="conv59.conv.bias"))

    x = conv2d_unit(lkrelu57,
                    i256, (1, 1),
                    stride=1,
                    padding=0,
                    name='conv60',
                    is_test=is_test,
                    trainable=trainable)
    x = P.resize_nearest(x, scale=float(2))
    x = P.concat([x, act19], axis=1)

    x = conv2d_unit(x,
                    i256, (1, 1),
                    stride=1,
                    padding=0,
                    name='conv61',
                    is_test=is_test,
                    trainable=trainable)
    x = conv2d_unit(x,
                    i512, (3, 3),
                    stride=1,
                    padding=1,
                    name='conv62',
                    is_test=is_test,
                    trainable=trainable)
    x = conv2d_unit(x,
                    i256, (1, 1),
                    stride=1,
                    padding=0,
                    name='conv63',
                    is_test=is_test,
                    trainable=trainable)
    x = conv2d_unit(x,
                    i512, (3, 3),
                    stride=1,
                    padding=1,
                    name='conv64',
                    is_test=is_test,
                    trainable=trainable)
    lkrelu64 = conv2d_unit(x,
                           i256, (1, 1),
                           stride=1,
                           padding=0,
                           name='conv65',
                           is_test=is_test,
                           trainable=trainable)

    x = conv2d_unit(lkrelu64,
                    i512, (3, 3),
                    stride=1,
                    padding=1,
                    name='conv66',
                    is_test=is_test,
                    trainable=trainable)
    y2 = P.conv2d(
        x,
        3 * (num_classes + 5),
        filter_size=(1, 1),
        param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01),
                             name="conv67.conv.weights"),
        bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0),
                            name="conv67.conv.bias"))

    x = conv2d_unit(lkrelu64,
                    i128, (1, 1),
                    stride=1,
                    padding=0,
                    name='conv68',
                    is_test=is_test,
                    trainable=trainable)
    x = P.resize_nearest(x, scale=float(2))
    x = P.concat([x, act11], axis=1)

    x = conv2d_unit(x,
                    i128, (1, 1),
                    stride=1,
                    padding=0,
                    name='conv69',
                    is_test=is_test,
                    trainable=trainable)
    x = conv2d_unit(x,
                    i256, (3, 3),
                    stride=1,
                    padding=1,
                    name='conv70',
                    is_test=is_test,
                    trainable=trainable)
    x = conv2d_unit(x,
                    i128, (1, 1),
                    stride=1,
                    padding=0,
                    name='conv71',
                    is_test=is_test,
                    trainable=trainable)
    x = conv2d_unit(x,
                    i256, (3, 3),
                    stride=1,
                    padding=1,
                    name='conv72',
                    is_test=is_test,
                    trainable=trainable)
    x = conv2d_unit(x,
                    i128, (1, 1),
                    stride=1,
                    padding=0,
                    name='conv73',
                    is_test=is_test,
                    trainable=trainable)
    x = conv2d_unit(x,
                    i256, (3, 3),
                    stride=1,
                    padding=1,
                    name='conv74',
                    is_test=is_test,
                    trainable=trainable)
    y3 = P.conv2d(
        x,
        3 * (num_classes + 5),
        filter_size=(1, 1),
        param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01),
                             name="conv75.conv.weights"),
        bias_attr=ParamAttr(initializer=fluid.initializer.Constant(0.0),
                            name="conv75.conv.bias"))

    # 相当于numpy的transpose(),交换下标
    y1 = P.transpose(y1, perm=[0, 2, 3, 1])
    y2 = P.transpose(y2, perm=[0, 2, 3, 1])
    y3 = P.transpose(y3, perm=[0, 2, 3, 1])
    return y1, y2, y3
Example #29
0
    def temporal_conv_layer(self, x, Kt, c_in, c_out, name, act_func='relu'):
        """Temporal convolution layer"""
        _, T, n, _ = x.shape
        if c_in > c_out:
            x_input = fl.conv2d(input=x,
                                num_filters=c_out,
                                filter_size=[1, 1],
                                stride=[1, 1],
                                padding="SAME",
                                data_format="NHWC",
                                param_attr=fluid.ParamAttr(name="%s_conv2d_1" %
                                                           name))
        elif c_in < c_out:
            # if the size of input channel is less than the output,
            # padding x to the same size of output channel.
            pad = fl.fill_constant_batch_size_like(
                input=x,
                shape=[-1, T, n, c_out - c_in],
                dtype="float32",
                value=0.0)
            x_input = fl.concat([x, pad], axis=3)
        else:
            x_input = x

        #  x_input = x_input[:, Kt - 1:T, :, :]
        if act_func == 'GLU':
            # gated liner unit
            bt_init = fluid.initializer.ConstantInitializer(value=0.0)
            bt = fl.create_parameter(
                shape=[2 * c_out],
                dtype="float32",
                attr=fluid.ParamAttr(name="%s_bt" % name,
                                     trainable=True,
                                     initializer=bt_init),
            )
            x_conv = fl.conv2d(input=x,
                               num_filters=2 * c_out,
                               filter_size=[Kt, 1],
                               stride=[1, 1],
                               padding="SAME",
                               data_format="NHWC",
                               param_attr=fluid.ParamAttr(name="%s_conv2d_wt" %
                                                          name))
            x_conv = x_conv + bt
            return (x_conv[:, :, :, 0:c_out] + x_input) * fl.sigmoid(
                x_conv[:, :, :, -c_out:])
        else:
            bt_init = fluid.initializer.ConstantInitializer(value=0.0)
            bt = fl.create_parameter(
                shape=[c_out],
                dtype="float32",
                attr=fluid.ParamAttr(name="%s_bt" % name,
                                     trainable=True,
                                     initializer=bt_init),
            )
            x_conv = fl.conv2d(input=x,
                               num_filters=c_out,
                               filter_size=[Kt, 1],
                               stride=[1, 1],
                               padding="SAME",
                               data_format="NHWC",
                               param_attr=fluid.ParamAttr(name="%s_conv2d_wt" %
                                                          name))
            x_conv = x_conv + bt
            if act_func == "linear":
                return x_conv
            elif act_func == "sigmoid":
                return fl.sigmoid(x_conv)
            elif act_func == "relu":
                return fl.relu(x_conv + x_input)
            else:
                raise ValueError(
                    f'ERROR: activation function "{act_func}" is not defined.')
Example #30
0
 def __call__(self, lr):
     x = image_resize(lr, scale=self.scale)
     x = conv2d(x, 64, (9, 9), padding=4, act='relu', name='conv1_1')
     x = conv2d(x, 32, (1, 1), act='relu', name='conv2_1')
     x = conv2d(x, 3, (5, 5), padding=2, name='conv3_1')
     return x
Example #31
0
# hyper-parameters

batch_size = 128
num_classes = 10
epochs = 12

img_rows = 28
img_cols = 28

# define the model

X = layers.data(name="img", shape=[-1, 1, 28, 28], dtype="float32")
Y = layers.data(name="label", shape=[-1, 1], dtype="int64")

h_conv = layers.conv2d(X, num_filters=32, filter_size=(3, 3), act="relu")
h_conv = layers.conv2d(h_conv, num_filters=64, filter_size=(3, 3), act="relu")
h_pool = layers.pool2d(h_conv, pool_size=(2, 2))
h_dropout = layers.dropout(h_pool, dropout_prob=0.25)
h_flatten = layers.flatten(h_dropout)
h_fc = layers.fc(h_flatten,
                 size=128,
                 act="relu",
                 bias_attr=fluid.param_attr.ParamAttr(name="b_0"))
h_dropout2 = layers.dropout(h_fc, dropout_prob=0.25)
pred = layers.fc(h_dropout2,
                 size=num_classes,
                 act="softmax",
                 bias_attr=fluid.param_attr.ParamAttr(name="b_1"))

loss = layers.reduce_mean(layers.cross_entropy(input=pred, label=Y))
Example #32
0
def down_sampling_2(x, num_filters, name, act='leaky_relu'):
    x = conv2d(x, num_filters, 3, stride=2, padding=1, name=name + "_conv2d")
    x = batch_norm(x, act=act, name=name + "_bn")
    x = dropout(x, 0.25, name=name + "_dropout")
    return x