示例#1
0
    def __init__(self,
                 num_features,
                 epsilon=1e-5,
                 momentum=0.9,
                 weight_attr=None,
                 bias_attr=None,
                 data_format="NCHW",
                 name=None):
        super(InstanceNorm, self).__init__()

        if weight_attr == False or bias_attr == False:
            assert weight_attr == bias_attr, "weight_attr and bias_attr must be set to Fasle at the same time in InstanceNorm"
        self._epsilon = epsilon
        self._weight_attr = weight_attr
        self._bias_attr = bias_attr

        if weight_attr != False and bias_attr != False:
            self.scale = self.create_parameter(
                attr=self._weight_attr,
                shape=[num_features],
                default_initializer=Constant(1.0),
                is_bias=False)
            self.bias = self.create_parameter(
                attr=self._bias_attr,
                shape=[num_features],
                default_initializer=Constant(0.0),
                is_bias=True)
        else:
            self.scale = None
            self.bias = None
示例#2
0
def ReLUConvBN(input,
               C_out,
               kernel_size,
               stride,
               padding,
               name='',
               affine=True):
    relu_a = fluid.layers.relu(input)
    conv2d_a = fluid.layers.conv2d(
        relu_a, C_out, kernel_size, stride, padding, bias_attr=False)
    if affine:
        reluconvbn_out = fluid.layers.batch_norm(
            conv2d_a,
            param_attr=ParamAttr(
                initializer=Constant(1.), name=name + 'op.2.weight'),
            bias_attr=ParamAttr(
                initializer=Constant(0.), name=name + 'op.2.bias'),
            moving_mean_name=name + 'op.2.running_mean',
            moving_variance_name=name + 'op.2.running_var')
    else:
        reluconvbn_out = fluid.layers.batch_norm(
            conv2d_a,
            param_attr=ParamAttr(
                initializer=Constant(1.),
                learning_rate=0.,
                name=name + 'op.2.weight'),
            bias_attr=ParamAttr(
                initializer=Constant(0.),
                learning_rate=0.,
                name=name + 'op.2.bias'),
            moving_mean_name=name + 'op.2.running_mean',
            moving_variance_name=name + 'op.2.running_var')
    return reluconvbn_out
示例#3
0
def StemConv0(input, C_out):
    conv_a = fluid.layers.conv2d(input,
                                 C_out // 2,
                                 3,
                                 stride=2,
                                 padding=1,
                                 bias_attr=False)
    bn_a = fluid.layers.batch_norm(
        conv_a,
        act='relu',
        param_attr=ParamAttr(initializer=Constant(1.), name='stem0.1.weight'),
        bias_attr=ParamAttr(initializer=Constant(0.), name='stem0.1.bias'),
        moving_mean_name='stem0.1.running_mean',
        moving_variance_name='stem0.1.running_var')

    conv_b = fluid.layers.conv2d(bn_a,
                                 C_out,
                                 3,
                                 stride=2,
                                 padding=1,
                                 bias_attr=False)
    bn_b = fluid.layers.batch_norm(
        conv_b,
        param_attr=ParamAttr(initializer=Constant(1.), name='stem0.3.weight'),
        bias_attr=ParamAttr(initializer=Constant(0.), name='stem0.3.bias'),
        moving_mean_name='stem0.3.running_mean',
        moving_variance_name='stem0.3.running_var')
    return bn_b
    def upsample(self, x, out_c, name=None):
        fan_in = x.shape[1] * 3 * 3
        stdv = 1. / math.sqrt(fan_in)
        if self.dcn_upsample:
            conv = DeformConv(x,
                              out_c,
                              3,
                              initializer=Uniform(-stdv, stdv),
                              bias_attr=True,
                              name=name + '.0')
        else:
            conv = fluid.layers.conv2d(
                x,
                out_c,
                3,
                padding=1,
                param_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
                bias_attr=ParamAttr(learning_rate=2., regularizer=L2Decay(0.)))

        norm_name = name + '.1'
        pattr = ParamAttr(name=norm_name + '.weight', initializer=Constant(1.))
        battr = ParamAttr(name=norm_name + '.bias', initializer=Constant(0.))
        bn = fluid.layers.batch_norm(
            input=conv,
            act='relu',
            param_attr=pattr,
            bias_attr=battr,
            name=norm_name + '.output.1',
            moving_mean_name=norm_name + '.running_mean',
            moving_variance_name=norm_name + '.running_var')
        up = fluid.layers.resize_bilinear(bn,
                                          scale=2,
                                          name=name + '.2.upsample')
        return up
 def __init__(self, *args, **kwargs):
     if 'affine' in kwargs:
         affine = kwargs.pop('affine')
     else:
         affine = True
     super().__init__(*args, **kwargs)
     self._use_global_stats = True
     if not affine:
         weight = (self.weight * 0 + 1).detach()
         bias = (self.bias * 0).detach()
         del self._parameters['bias']
         del self._parameters['weight']
         self.weight = weight
         self.bias = bias
     self.weight.stop_gradient = True
     self.bias.stop_gradient = True
     self.accumulated_mean = self.create_parameter(
         shape=[args[0]], default_initializer=Constant(0.0))
     self.accumulated_var = self.create_parameter(
         shape=[args[0]], default_initializer=Constant(0.0))
     self.accumulated_counter = self.create_parameter(
         shape=[1], default_initializer=Constant(1e-12))
     self.accumulated_mean.stop_gradient = True
     self.accumulated_var.stop_gradient = True
     self.accumulated_counter.stop_gradient = True
示例#6
0
def create_parameter(layers, shape, dtype):
    # use layerhelper to init bias, scale, mean, variance
    helper = LayerHelper("batch_norm", **locals())
    param_name = "batch_norm_" + str(layers)
    scale = helper.create_parameter(attr=fluid.ParamAttr(name=param_name +
                                                         '.w' + '_0'),
                                    shape=[shape],
                                    dtype=dtype,
                                    default_initializer=Constant(1.0))
    scale.stop_gradient = True

    bias = helper.create_parameter(attr=fluid.ParamAttr(name=param_name +
                                                        '.b' + '_0'),
                                   shape=[shape],
                                   dtype=dtype,
                                   is_bias=True)
    bias.stop_gradient = True

    mean = helper.create_parameter(attr=ParamAttr(name=param_name + '.w' +
                                                  '_1',
                                                  initializer=Constant(0.0),
                                                  trainable=False),
                                   shape=[shape],
                                   dtype=dtype)
    mean.stop_gradient = True

    variance = helper.create_parameter(attr=ParamAttr(
        name=param_name + '.w' + '_2',
        initializer=Constant(1.0),
        trainable=False),
                                       shape=[shape],
                                       dtype=dtype)
    variance.stop_gradient = True

    return scale, bias, mean, variance
示例#7
0
 def __init__(self, memory, base_name, channel):
     super(BatchNorm, self).__init__()
     self.memory = memory
     self.base_name = base_name
     self.call_count = 0
     self.scale_name = "%s_scale" % base_name
     self.bias_name = "%s_bias" % base_name
     self.mean_name = "%s_mean" % base_name
     self.var_name = "%s_var" % base_name
     start_block = memory.startup_program.global_block()
     main_block = memory.main_program.current_block()
     self.scale = start_block.create_parameter(
         name=self.scale_name,
         shape=[channel],
         dtype='float32',
         default_initializer=Constant(1.0))
     self.bias = start_block.create_parameter(name=self.bias_name,
                                              shape=[channel],
                                              dtype='float32')
     self.mean = start_block.create_parameter(name=self.mean_name,
                                              initializer=Constant(0.0),
                                              trainable=False,
                                              do_model_average=False,
                                              shape=[channel],
                                              dtype='float32')
     self.mean.stop_gradient = True
     self.variance = start_block.create_parameter(name=self.var_name,
                                                  initializer=Constant(1.0),
                                                  trainable=False,
                                                  do_model_average=False,
                                                  shape=[channel],
                                                  dtype='float32')
     self.main_scale = main_block.create_parameter(name=self.scale_name,
                                                   shape=[channel],
                                                   dtype='float32')
     self.main_bias = main_block.create_parameter(name=self.bias_name,
                                                  shape=[channel],
                                                  dtype='float32')
     self.main_mean = main_block.create_parameter(name=self.mean_name,
                                                  do_model_average=False,
                                                  shape=[channel],
                                                  dtype='float32')
     self.main_mean.stop_gradient = True
     self.main_variance = main_block.create_parameter(
         name=self.var_name,
         do_model_average=False,
         shape=[channel],
         dtype='float32')
     self.main_variance.stop_gradient = True
     self.memory.add_weight(self.variance)
     self.memory.add_weight(self.mean)
     self.memory.add_weight(self.scale)
     self.memory.add_weight(self.bias)
示例#8
0
def DilConv(input,
            C_in,
            C_out,
            kernel_size,
            stride,
            padding,
            dilation,
            name='',
            affine=True):
    relu_a = fluid.layers.relu(input)
    conv2d_a = fluid.layers.conv2d(relu_a,
                                   C_in,
                                   kernel_size,
                                   stride,
                                   padding,
                                   dilation,
                                   groups=C_in,
                                   param_attr=ParamAttr(
                                       initializer=Xavier(uniform=False,
                                                          fan_in=0),
                                       name=name + 'op.1.weight'),
                                   bias_attr=False,
                                   use_cudnn=False)
    conv2d_b = fluid.layers.conv2d(conv2d_a,
                                   C_out,
                                   1,
                                   param_attr=ParamAttr(
                                       initializer=Xavier(uniform=False,
                                                          fan_in=0),
                                       name=name + 'op.2.weight'),
                                   bias_attr=False)
    if affine:
        dilconv_out = fluid.layers.batch_norm(
            conv2d_b,
            param_attr=ParamAttr(initializer=Constant(1.),
                                 name=name + 'op.3.weight'),
            bias_attr=ParamAttr(initializer=Constant(0.),
                                name=name + 'op.3.bias'),
            moving_mean_name=name + 'op.3.running_mean',
            moving_variance_name=name + 'op.3.running_var')
    else:
        dilconv_out = fluid.layers.batch_norm(
            conv2d_b,
            param_attr=ParamAttr(initializer=Constant(1.),
                                 learning_rate=0.,
                                 name=name + 'op.3.weight'),
            bias_attr=ParamAttr(initializer=Constant(0.),
                                learning_rate=0.,
                                name=name + 'op.3.bias'),
            moving_mean_name=name + 'op.3.running_mean',
            moving_variance_name=name + 'op.3.running_var')
    return dilconv_out
示例#9
0
    def _insert_quant_moving_average_abs_max_op(self, block, idx, var,
                                                quant_bits):
        """Insert fake_quantize_moving_average_abs_max
        """
        quant_var = block.create_var(name=_quantized_var_name(var.name),
                                     type=var.type,
                                     shape=var.shape,
                                     dtype=var.dtype)
        state = self.helper.create_global_variable(
            name=unique_name.generate('state'),
            persistable=True,
            dtype=var.dtype,
            shape=[1])
        self.helper.set_variable_initializer(state,
                                             initializer=Constant(value=1))
        accum = self.helper.create_global_variable(
            name=unique_name.generate('accum'),
            persistable=True,
            dtype=var.dtype,
            shape=[1])
        self.helper.set_variable_initializer(accum,
                                             initializer=Constant(value=1))
        scale = self.helper.create_parameter(attr=ParamAttr(
            name=_quantized_scale_name(var.name),
            initializer=Constant(0.001),
            trainable=False),
                                             shape=[1],
                                             dtype=var.dtype)
        scale.stop_gradient = True

        ins = {'X': var, 'InScale': scale}
        outs = {'Out': quant_var, 'OutScale': scale}
        if not self.is_test:
            ins['InState'] = state
            ins['InAccum'] = accum
            outs['OutState'] = state
            outs['OutAccum'] = accum

        attrs = {
            'bit_length': quant_bits,
            'moving_rate': self.moving_rate,
            'is_test': self.is_test
        }

        quant_op = block._insert_op(
            idx,
            type='fake_quantize_moving_average_abs_max',
            attrs=attrs,
            inputs=ins,
            outputs=outs)

        return quant_var, scale
示例#10
0
 def _conv_offset(self, input, filter_size, stride, padding, act=None, name=None):
     out_channel = filter_size * filter_size * 3
     out = fluid.layers.conv2d(
         input,
         num_filters=out_channel,
         filter_size=filter_size,
         stride=stride,
         padding=padding,
         param_attr=ParamAttr(initializer=Constant(0.0), name=name + ".w_0"),
         bias_attr=ParamAttr(initializer=Constant(0.0), name=name + ".b_0"),
         act=act,
         name=name)
     return out
示例#11
0
def StemConv1(input, C_out):
    relu_a = fluid.layers.relu(input)
    conv_a = fluid.layers.conv2d(
        relu_a, C_out, 3, stride=2, padding=1, bias_attr=False)
    bn_a = fluid.layers.batch_norm(
        conv_a,
        param_attr=ParamAttr(
            initializer=Constant(1.), name='stem1.1.weight'),
        bias_attr=ParamAttr(
            initializer=Constant(0.), name='stem1.1.bias'),
        moving_mean_name='stem1.1.running_mean',
        moving_variance_name='stem1.1.running_var')
    return bn_a
示例#12
0
def FactorizedReduce(input, C_out, name='', affine=True):
    relu_a = fluid.layers.relu(input)
    conv2d_a = fluid.layers.conv2d(
        relu_a,
        C_out // 2,
        1,
        2,
        param_attr=ParamAttr(
            initializer=Xavier(
                uniform=False, fan_in=0),
            name=name + 'conv_1.weight'),
        bias_attr=False)
    h_end = relu_a.shape[2]
    w_end = relu_a.shape[3]
    slice_a = fluid.layers.slice(relu_a, [2, 3], [1, 1], [h_end, w_end])
    conv2d_b = fluid.layers.conv2d(
        slice_a,
        C_out // 2,
        1,
        2,
        param_attr=ParamAttr(
            initializer=Xavier(
                uniform=False, fan_in=0),
            name=name + 'conv_2.weight'),
        bias_attr=False)
    out = fluid.layers.concat([conv2d_a, conv2d_b], axis=1)
    if affine:
        out = fluid.layers.batch_norm(
            out,
            param_attr=ParamAttr(
                initializer=Constant(1.), name=name + 'bn.weight'),
            bias_attr=ParamAttr(
                initializer=Constant(0.), name=name + 'bn.bias'),
            moving_mean_name=name + 'bn.running_mean',
            moving_variance_name=name + 'bn.running_var')
    else:
        out = fluid.layers.batch_norm(
            out,
            param_attr=ParamAttr(
                initializer=Constant(1.),
                learning_rate=0.,
                name=name + 'bn.weight'),
            bias_attr=ParamAttr(
                initializer=Constant(0.),
                learning_rate=0.,
                name=name + 'bn.bias'),
            moving_mean_name=name + 'bn.running_mean',
            moving_variance_name=name + 'bn.running_var')
    return out
示例#13
0
    def rfp_weight(self, feat, name=''):
        add_weight = fluid.layers.conv2d(
            feat,
            1,
            filter_size=1,
            stride=1,
            padding=0,
            param_attr=ParamAttr(initializer=Constant(0),
                                 name=name + 'rfp_weight.w'),
            bias_attr=ParamAttr(initializer=Constant(0),
                                name=name + 'rfp_weight.b'),
            name=name + 'rfp_weight')
        add_weight = fluid.layers.sigmoid(add_weight)

        return add_weight
示例#14
0
def AuxiliaryHeadImageNet(input, num_classes, aux_name='auxiliary_head'):
    relu_a = fluid.layers.relu(input)
    pool_a = fluid.layers.pool2d(relu_a, 5, 'avg', 2)
    conv2d_a = fluid.layers.conv2d(
        pool_a,
        128,
        1,
        name=aux_name + '.features.2',
        bias_attr=False)
    bn_a_name = aux_name + '.features.3'
    bn_a = fluid.layers.batch_norm(
        conv2d_a,
        act='relu',
        name=bn_a_name,
        param_attr=ParamAttr(
            initializer=Constant(1.), name=bn_a_name + '.weight'),
        bias_attr=ParamAttr(
            initializer=Constant(0.), name=bn_a_name + '.bias'),
        moving_mean_name=bn_a_name + '.running_mean',
        moving_variance_name=bn_a_name + '.running_var')
    conv2d_b = fluid.layers.conv2d(
        bn_a,
        768,
        2,
        name=aux_name + '.features.5',
        bias_attr=False)
    bn_b_name = aux_name + '.features.6'
    bn_b = fluid.layers.batch_norm(
        conv2d_b,
        act='relu',
        name=bn_b_name,
        param_attr=ParamAttr(
            initializer=Constant(1.), name=bn_b_name + '.weight'),
        bias_attr=ParamAttr(
            initializer=Constant(0.), name=bn_b_name + '.bias'),
        moving_mean_name=bn_b_name + '.running_mean',
        moving_variance_name=bn_b_name + '.running_var')
    pool_b = fluid.layers.adaptive_pool2d(bn_b, (1, 1), "avg") 
    fc_name = aux_name + '.classifier'
    fc = fluid.layers.fc(pool_b,
                         num_classes,
                         name=fc_name,
                         param_attr=ParamAttr(
                             initializer=Normal(scale=1e-3),
                             name=fc_name + '.weight'),
                         bias_attr=ParamAttr(
                             initializer=Constant(0.), name=fc_name + '.bias'))
    return fc
示例#15
0
    def _init_layers(self):
        self.cls_convs = paddle.nn.LayerList()   # 每个fpn输出特征图  共享的  再进行卷积的卷积层,用于预测类别
        self.krn_convs = paddle.nn.LayerList()   # 每个fpn输出特征图  共享的  再进行卷积的卷积层,用于预测卷积核

        # 每个fpn输出特征图  共享的  卷积层。
        for lvl in range(0, self.num_convs):
            use_dcn = lvl in self.dcn_v2_stages

            # 使用gn,组数是32,而且带激活relu
            in_ch = self.in_channels if lvl == 0 else self.seg_feat_channels
            cls_conv_layer = Conv2dUnit(in_ch, self.seg_feat_channels, 3, stride=1, bias_attr=False, norm_type='gn', norm_groups=32, act='relu', use_dcn=use_dcn, name='head.cls_convs.%d' % (lvl, ))
            self.cls_convs.append(cls_conv_layer)

            in_ch = self.in_channels + 2 if lvl == 0 else self.seg_feat_channels
            krn_conv_layer = Conv2dUnit(in_ch, self.seg_feat_channels, 3, stride=1, bias_attr=False, norm_type='gn', norm_groups=32, act='relu', use_dcn=use_dcn, name='head.krn_convs.%d' % (lvl, ))
            self.krn_convs.append(krn_conv_layer)

        if self.drop_block:
            drop_block1 = DropBlock(block_size=3, keep_prob=0.9, is_test=False)
            drop_block2 = DropBlock(block_size=3, keep_prob=0.9, is_test=False)
            self.cls_convs.append(drop_block1)
            self.krn_convs.append(drop_block2)

        # 类别分支最后的卷积。设置偏移的初始值使得各类别预测概率初始值为self.prior_prob (根据激活函数是sigmoid()时推导出,和RetinaNet中一样)
        bias_init_value = -math.log((1 - self.prior_prob) / self.prior_prob)
        cls_last_conv_layer = Conv2dUnit(self.seg_feat_channels, self.cate_out_channels, 3, stride=1, bias_attr=True, act=None,
                                         bias_init=Constant(bias_init_value), name='head.cls_convs.%d' % (self.num_convs, ))
        # 卷积核分支最后的卷积
        krn_last_conv_layer = Conv2dUnit(self.seg_feat_channels, self.kernel_out_channels, 3, stride=1, bias_attr=True, act=None, name='head.krn_convs.%d' % (self.num_convs, ))
        self.cls_convs.append(cls_last_conv_layer)
        self.krn_convs.append(krn_last_conv_layer)
示例#16
0
 def __call__(self, inputs, name=''):
     x = fluid.layers.swish(inputs)
     # depthwise
     x = fluid.layers.conv2d(x,
                             self.num_chan,
                             filter_size=3,
                             padding='SAME',
                             groups=self.num_chan,
                             param_attr=ParamAttr(initializer=Xavier(),
                                                  name=name + '_dw_w'),
                             bias_attr=False)
     # pointwise
     x = fluid.layers.conv2d(x,
                             self.num_chan,
                             filter_size=1,
                             param_attr=ParamAttr(initializer=Xavier(),
                                                  name=name + '_pw_w'),
                             bias_attr=ParamAttr(regularizer=L2Decay(0.),
                                                 name=name + '_pw_b'))
     # bn + act
     x = fluid.layers.batch_norm(
         x,
         momentum=0.997,
         epsilon=1e-04,
         param_attr=ParamAttr(initializer=Constant(1.0),
                              regularizer=L2Decay(0.),
                              name=name + '_bn_w'),
         bias_attr=ParamAttr(regularizer=L2Decay(0.), name=name + '_bn_b'))
     return x
示例#17
0
    def __call__(self, x, name=''):
        avg_x = fluid.layers.adaptive_pool2d(x,
                                             1,
                                             pool_type="avg",
                                             name=name + 'aspp_ada_gap')
        outs = []
        for idx in range(self.aspp_num):
            inp = avg_x if (idx == self.aspp_num - 1) else x
            out = fluid.layers.conv2d(
                inp,
                self.out_channels,
                filter_size=self.kernel_sizes[idx],
                stride=1,
                padding=self.paddinds[idx],
                dilation=self.dilations[idx],
                param_attr=ParamAttr(name=name + 'aspp_conv{}.w'.format(idx)),
                bias_attr=ParamAttr(initializer=Constant(0),
                                    name=name + 'aspp_conv{}.b'.format(idx)),
                act='relu',
                name=name + 'aspp_conv{}'.format(idx))
            outs.append(out)
        outs[-1] = fluid.layers.expand(
            outs[-1], [1, 1, outs[0].shape[2], outs[0].shape[3]])
        out = fluid.layers.concat(outs, axis=1)

        return out
示例#18
0
    def __init__(self, pos_score, neg_score):
        """ """
        kwargs = locals()
        del kwargs['self']

        helper = LayerHelper("PaddleRec_PosNegRatio", **kwargs)
        if "pos_score" not in kwargs or "neg_score" not in kwargs:
            raise ValueError(
                "PosNegRatio expect pos_score and neg_score as inputs.")
        pos_score = kwargs.get('pos_score')
        neg_score = kwargs.get('neg_score')

        if not isinstance(pos_score, Variable):
            raise ValueError("pos_score must be Variable, but received %s" %
                             type(pos_score))
        if not isinstance(neg_score, Variable):
            raise ValueError("neg_score must be Variable, but received %s" %
                             type(neg_score))

        wrong = fluid.layers.cast(fluid.layers.less_equal(
            pos_score, neg_score),
                                  dtype='float32')
        wrong_cnt = fluid.layers.reduce_sum(wrong)
        right = fluid.layers.cast(fluid.layers.less_than(neg_score, pos_score),
                                  dtype='float32')
        right_cnt = fluid.layers.reduce_sum(right)

        global_right_cnt, _ = helper.create_or_get_global_variable(
            name="right_cnt", persistable=True, dtype='float32', shape=[1])
        global_wrong_cnt, _ = helper.create_or_get_global_variable(
            name="wrong_cnt", persistable=True, dtype='float32', shape=[1])

        for var in [global_right_cnt, global_wrong_cnt]:
            helper.set_variable_initializer(
                var, Constant(value=0.0, force_cpu=True))

        helper.append_op(type="elementwise_add",
                         inputs={
                             "X": [global_right_cnt],
                             "Y": [right_cnt]
                         },
                         outputs={"Out": [global_right_cnt]})
        helper.append_op(type="elementwise_add",
                         inputs={
                             "X": [global_wrong_cnt],
                             "Y": [wrong_cnt]
                         },
                         outputs={"Out": [global_wrong_cnt]})
        self.pn = (global_right_cnt + 1.0) / (global_wrong_cnt + 1.0)

        self._global_metric_state_vars = dict()
        self._global_metric_state_vars['right_cnt'] = (global_right_cnt.name,
                                                       "float32")
        self._global_metric_state_vars['wrong_cnt'] = (global_wrong_cnt.name,
                                                       "float32")

        self.metrics = dict()
        self.metrics['WrongCnt'] = global_wrong_cnt
        self.metrics['RightCnt'] = global_right_cnt
        self.metrics['PN'] = self.pn
示例#19
0
 def __init__(self,
              name=None,
              channel_num=None,
              quant_bits=8,
              quant_axis=0,
              dtype='float32',
              quant_on_weight=False):
     assert quant_on_weight == True, "Channel_wise only can be used on weight quantization."
     super(FakeQuantChannelWiseAbsMax, self).__init__()
     self._quant_bits = quant_bits
     self._quant_axis = quant_axis
     self._dtype = dtype
     self._name = name
     self._channel_num = channel_num
     scale_prefix = "{}.scale".format(
         name) if name else 'quant_dequant.scale'
     self._scale_name = unique_name.generate(scale_prefix)
     if quant_on_weight:
         scale_attr = ParamAttr(name=self._scale_name,
                                initializer=Constant(0.0),
                                trainable=False)
         self._scale = self.create_parameter(shape=[self._channel_num],
                                             attr=scale_attr,
                                             dtype=self._dtype)
         self._scale.stop_gradient = True
     else:
         self._scale = None
示例#20
0
 def __init__(self, num_features, eps=1e-5):
     super(adaILN, self).__init__()
     self.eps = eps
     self.rho = self.create_parameter(attr=True,
                                      shape=(1, num_features, 1, 1),
                                      dtype='float32',
                                      default_initializer=Constant(0.9),
                                      is_bias=False)
     self.num_features = num_features
示例#21
0
    def __init__(self, input, label, k=20):
        """ """
        kwargs = locals()
        del kwargs['self']
        self.k = k

        if not isinstance(input, Variable):
            raise ValueError("input must be Variable, but received %s" %
                             type(input))
        if not isinstance(label, Variable):
            raise ValueError("label must be Variable, but received %s" %
                             type(label))

        helper = LayerHelper("PaddleRec_RecallK", **kwargs)
        batch_accuracy = accuracy(input, label, self.k)
        global_ins_cnt, _ = helper.create_or_get_global_variable(
            name="ins_cnt", persistable=True, dtype='float32', shape=[1])
        global_pos_cnt, _ = helper.create_or_get_global_variable(
            name="pos_cnt", persistable=True, dtype='float32', shape=[1])

        for var in [global_ins_cnt, global_pos_cnt]:
            helper.set_variable_initializer(
                var, Constant(value=0.0, force_cpu=True))

        tmp_ones = fluid.layers.fill_constant(shape=fluid.layers.shape(label),
                                              dtype="float32",
                                              value=1.0)
        batch_ins = fluid.layers.reduce_sum(tmp_ones)
        batch_pos = batch_ins * batch_accuracy

        helper.append_op(type="elementwise_add",
                         inputs={
                             "X": [global_ins_cnt],
                             "Y": [batch_ins]
                         },
                         outputs={"Out": [global_ins_cnt]})

        helper.append_op(type="elementwise_add",
                         inputs={
                             "X": [global_pos_cnt],
                             "Y": [batch_pos]
                         },
                         outputs={"Out": [global_pos_cnt]})

        self.acc = global_pos_cnt / global_ins_cnt

        self._global_metric_state_vars = dict()
        self._global_metric_state_vars['ins_cnt'] = (global_ins_cnt.name,
                                                     "float32")
        self._global_metric_state_vars['pos_cnt'] = (global_pos_cnt.name,
                                                     "float32")

        metric_name = "Acc(Recall@%d)" % self.k
        self.metrics = dict()
        self.metrics["InsCnt"] = global_ins_cnt
        self.metrics["RecallCnt"] = global_pos_cnt
        self.metrics[metric_name] = self.acc
示例#22
0
 def _create_param(in_layer, first_name, last_name, dtype):
     prefix = '{}.{}'.format(first_name, last_name) \
         if first_name else 'outscale.{}'.format(last_name)
     attr = ParamAttr(name=unique_name.generate(prefix),
                      initializer=Constant(1),
                      trainable=False)
     param = in_layer.create_parameter(shape=[1],
                                       attr=attr,
                                       dtype=dtype)
     return param
示例#23
0
    def __init__(self,
                 name_scope,
                 ch_out,
                 filter_size,
                 stride,
                 padding,
                 learning_rate=1.0,
                 act='relu'):
        super(conv_affine_layer, self).__init__(name_scope)
        
        self._conv = Conv2D(
            name_scope,
            num_filters=ch_out,
            filter_size=filter_size,
            stride=stride,
            padding=padding,
            act=None,
            param_attr=ParamAttr(
                name=name_scope + "_weights",
                learning_rate=learning_rate),
            bias_attr=False)
        if name_scope == "conv1":
            bn_name = "bn_" + name_scope
        else:
            bn_name = "bn" + name_scope[3:]
        self.name_scope = name_scope

        self.scale = fluid.layers.create_parameter(
            shape=[ch_out],
            dtype='float32',
            attr=ParamAttr(
                name=bn_name + '_scale', learning_rate=0.),
            default_initializer=Constant(1.))
        #self.scale._stop_gradient = True
        self.bias = fluid.layers.create_parameter(
            shape=[ch_out],
            dtype='float32',
            attr=ParamAttr(
                bn_name + '_offset', learning_rate=0.),
            default_initializer=Constant(0.))
        #self.bias._stop_gradient = True
        
        self.act = act
示例#24
0
    def _insert_quant_range_abs_max_op(self, block, idx, var, quant_bits):
        """Insert fake_quantize_range_abs_max
        """
        quant_var = block.create_var(name=_quantized_var_name(var.name),
                                     type=var.type,
                                     shape=var.shape,
                                     dtype=var.dtype)
        scale = self.helper.create_parameter(attr=ParamAttr(
            name=_quantized_scale_name(var.name),
            initializer=Constant(0.001),
            trainable=False),
                                             shape=[1],
                                             dtype=var.dtype)
        scale.stop_gradient = True

        ins = {'X': var, 'InScale': scale}
        outs = {'Out': quant_var, 'OutScale': scale}
        if not self.is_test:
            # A global step counter variable with type int64
            scales = self.helper.create_global_variable(
                name=unique_name.generate('scales'),
                persistable=True,
                dtype=var.dtype,
                shape=[self.window_size])
            self.helper.set_variable_initializer(scales,
                                                 initializer=Constant(value=0))

            ins['Iter'] = self.global_step
            outs['OutScales'] = scales

        attrs = {
            'window_size': self.window_size,
            'bit_length': quant_bits,
            'is_test': self.is_test
        }

        quant_op = block._insert_op(idx,
                                    type='fake_quantize_range_abs_max',
                                    attrs=attrs,
                                    inputs=ins,
                                    outputs=outs)

        return quant_var, scale
示例#25
0
 def _head(self,
           x,
           out_c,
           conv_num=1,
           head_out_c=None,
           name=None,
           is_test=False):
     head_out_c = self.head_conv if not head_out_c else head_out_c
     conv_w_std = 0.01 if '.hm' in name else 0.001
     conv_w_init = Normal(0, conv_w_std)
     for i in range(conv_num):
         conv_name = '{}.{}.conv'.format(name, i)
         if self.dcn_head:
             x = DeformConv(
                 x,
                 head_out_c,
                 3,
                 initializer=conv_w_init,
                 name=conv_name + '.dcn')
             x = fluid.layers.relu(x)
         else:
             x = fluid.layers.conv2d(
                 x,
                 head_out_c,
                 3,
                 padding=1,
                 param_attr=ParamAttr(
                     initializer=conv_w_init, name=conv_name + '.weight'),
                 bias_attr=ParamAttr(
                     learning_rate=2.,
                     regularizer=L2Decay(0.),
                     name=conv_name + '.bias'),
                 act='relu')
     if self.drop_block and '.hm' in name:
         x = DropBlock(
             x,
             block_size=self.block_size,
             keep_prob=self.keep_prob,
             is_test=is_test)
     bias_init = float(-np.log((1 - 0.01) / 0.01)) if '.hm' in name else 0.
     conv_b_init = Constant(bias_init)
     x = fluid.layers.conv2d(
         x,
         out_c,
         1,
         param_attr=ParamAttr(
             initializer=conv_w_init,
             name='{}.{}.weight'.format(name, conv_num)),
         bias_attr=ParamAttr(
             learning_rate=2.,
             regularizer=L2Decay(0.),
             name='{}.{}.bias'.format(name, conv_num),
             initializer=conv_b_init))
     return x
示例#26
0
def conv_affine_layer(input,
                      ch_out,
                      filter_size,
                      stride,
                      padding,
                      act='relu',
                      name=None):
    conv = fluid.layers.conv2d(
        input=input,
        num_filters=ch_out,
        filter_size=filter_size,
        stride=stride,
        padding=padding,
        act=None,
        param_attr=ParamAttr(name=name + "_weights"),
        bias_attr=False,
        name=name + '.conv2d.output.1')
    if name == "conv1":
        bn_name = "bn_" + name
    else:
        bn_name = "bn" + name[3:]

    scale = fluid.layers.create_parameter(
        shape=[conv.shape[1]],
        dtype=conv.dtype,
        attr=ParamAttr(
            name=bn_name + '_scale', learning_rate=0.),
        default_initializer=Constant(1.))
    scale.stop_gradient = True
    bias = fluid.layers.create_parameter(
        shape=[conv.shape[1]],
        dtype=conv.dtype,
        attr=ParamAttr(
            bn_name + '_offset', learning_rate=0.),
        default_initializer=Constant(0.))
    bias.stop_gradient = True

    out = fluid.layers.affine_channel(x=conv, scale=scale, bias=bias)
    if act == 'relu':
        out = fluid.layers.relu(x=out)
    return out
示例#27
0
def SevenConv(input, C_out, stride, name='', affine=True):
    relu_a = fluid.layers.relu(input)
    conv2d_a = fluid.layers.conv2d(
        relu_a,
        C_out, (1, 7), (1, stride), (0, 3),
        param_attr=ParamAttr(
            initializer=Xavier(
                uniform=False, fan_in=0),
            name=name + 'op.1.weight'),
        bias_attr=False)
    conv2d_b = fluid.layers.conv2d(
        conv2d_a,
        C_out, (7, 1), (stride, 1), (3, 0),
        param_attr=ParamAttr(
            initializer=Xavier(
                uniform=False, fan_in=0),
            name=name + 'op.2.weight'),
        bias_attr=False)
    if affine:
        out = fluid.layers.batch_norm(
            conv2d_b,
            param_attr=ParamAttr(
                initializer=Constant(1.), name=name + 'op.3.weight'),
            bias_attr=ParamAttr(
                initializer=Constant(0.), name=name + 'op.3.bias'),
            moving_mean_name=name + 'op.3.running_mean',
            moving_variance_name=name + 'op.3.running_var')
    else:
        out = fluid.layers.batch_norm(
            conv2d_b,
            param_attr=ParamAttr(
                initializer=Constant(1.),
                learning_rate=0.,
                name=name + 'op.3.weight'),
            bias_attr=ParamAttr(
                initializer=Constant(0.),
                learning_rate=0.,
                name=name + 'op.3.bias'),
            moving_mean_name=name + 'op.3.running_mean',
            moving_variance_name=name + 'op.3.running_var')
示例#28
0
    def __init__(self, name=None, moving_rate=0.9, dtype='float32'):
        r"""
        MovingAverageMaxScale layer is used to calculating the output quantization scale of Layer.
        Its computational formula is described as below:

        :math:`scale = (moving\_rate*accum+max(abs(x)))/(moving\_rate*state+1)`
        :math:`Out = X`
        """
        super(MovingAverageAbsMaxScale, self).__init__()
        self._moving_rate = moving_rate
        self._dtype = dtype

        scale_prefix = '{}.scale'.format(name) if name else 'outscale.scale'
        name = unique_name.generate(scale_prefix)
        scale_attr = ParamAttr(name=name,
                               initializer=Constant(1),
                               trainable=False)
        self._scale = self.create_parameter(shape=[1],
                                            attr=scale_attr,
                                            dtype=self._dtype)
        self._scale.stop_gradient = True

        state_prefix = "{}.state".format(name) if name else 'outscale.state'
        state_attr = ParamAttr(name=unique_name.generate(state_prefix),
                               initializer=Constant(1),
                               trainable=False)
        self._state = self.create_parameter(shape=[1],
                                            attr=state_attr,
                                            dtype=self._dtype)
        self._state.stop_gradient = True

        accum_prefix = "{}.accum".format(name) if name else 'outscale.accum'
        accum_attr = ParamAttr(name=unique_name.generate(accum_prefix),
                               initializer=Constant(1),
                               trainable=False)
        self._accum = self.create_parameter(shape=[1],
                                            attr=accum_attr,
                                            dtype=self._dtype)
        self._accum.stop_gradient = True
        MovingAverageAbsMaxScale._has_create = True
示例#29
0
 def test_prelu(self):
     program = Program()
     with program_guard(program):
         input = layers.data(name="input",
                             shape=[5, 200, 100, 100],
                             dtype="float32")
         mode = 'channel'
         out = layers.prelu(input,
                            mode,
                            param_attr=ParamAttr(initializer=Constant(1.0)),
                            name='prelu')
         self.assertIsNotNone(out)
     print(str(program))
 def pred_mod(self, x, dim, name=None):
     conv0 = _conv_norm(
         x, 1, 256, with_bn=False, bn_act='relu', name=name + '_0')
     conv1 = fluid.layers.conv2d(
         input=conv0,
         filter_size=1,
         num_filters=dim,
         param_attr=ParamAttr(
             name=name + "_1_weight", initializer=kaiming_init(conv0, 1)),
         bias_attr=ParamAttr(
             name=name + "_1_bias", initializer=Constant(-2.19)),
         name=name + '_1')
     return conv1