コード例 #1
0
    def __init__(self,
                 num_channels: int,
                 num_filters: int,
                 reduction_ratio: int,
                 name: str = None):
        super(SELayer, self).__init__()

        self.pool2d_gap = nn.AdaptiveAvgPool2D(1)

        self._num_channels = num_channels

        med_ch = int(num_channels / reduction_ratio)
        stdv = 1.0 / math.sqrt(num_channels * 1.0)
        self.squeeze = nn.Linear(
            num_channels,
            med_ch,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Uniform(-stdv, stdv)))

        stdv = 1.0 / math.sqrt(med_ch * 1.0)
        self.excitation = nn.Linear(
            med_ch,
            num_filters,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Uniform(-stdv, stdv)))
コード例 #2
0
    def __init__(self,
                 input_units,
                 output_units,
                 iters=3,
                 maxlen=32,
                 k_max=3,
                 init_std=1.0,
                 batch_size=None):
        super(Mind_Capsual_Layer, self).__init__()

        self.iters = iters
        self.input_units = input_units
        self.output_units = output_units
        self.maxlen = maxlen
        self.init_std = init_std
        self.k_max = k_max
        self.batch_size = batch_size

        # B2I routing
        self.routing_logits = self.create_parameter(
            shape=[1, self.k_max, self.maxlen],
            attr=paddle.ParamAttr(name="routing_logits", trainable=False),
            default_initializer=nn.initializer.Normal(mean=0.0,
                                                      std=self.init_std))

        # bilinear mapping
        self.bilinear_mapping_matrix = self.create_parameter(
            shape=[self.input_units, self.output_units],
            attr=paddle.ParamAttr(name="bilinear_mapping_matrix",
                                  trainable=True),
            default_initializer=nn.initializer.Normal(mean=0.0,
                                                      std=self.init_std))
コード例 #3
0
    def __init__(self,
                 head,
                 in_channel,
                 roi_extractor=RoIAlign().__dict__,
                 bbox_assigner='BboxAssigner',
                 with_pool=False,
                 num_classes=80,
                 bbox_weight=[10., 10., 5., 5.]):
        super(BBoxHead, self).__init__()
        self.head = head
        self.roi_extractor = roi_extractor
        if isinstance(roi_extractor, dict):
            self.roi_extractor = RoIAlign(**roi_extractor)
        self.bbox_assigner = bbox_assigner

        self.with_pool = with_pool
        self.num_classes = num_classes
        self.bbox_weight = bbox_weight

        lr_factor = 1.
        self.bbox_score = nn.Linear(
            in_channel,
            self.num_classes + 1,
            weight_attr=paddle.ParamAttr(
                initializer=Normal(mean=0.0, std=0.01)))

        self.bbox_delta = nn.Linear(
            in_channel,
            4 * self.num_classes,
            weight_attr=paddle.ParamAttr(
                initializer=Normal(mean=0.0, std=0.001)))
        self.assigned_label = None
        self.assigned_rois = None
コード例 #4
0
def linear_init(input_size, hidden_size, with_bias=True, init_type='gcn'):
    if init_type == 'gcn':
        fc_w_attr = paddle.ParamAttr(initializer=nn.initializer.XavierNormal())
        fc_bias_attr = paddle.ParamAttr(
            initializer=nn.initializer.Constant(0.0))
    else:
        fan_in = input_size
        bias_bound = 1.0 / math.sqrt(fan_in)
        fc_bias_attr = paddle.ParamAttr(initializer=nn.initializer.Uniform(
            low=-bias_bound, high=bias_bound))

        negative_slope = math.sqrt(5)
        gain = math.sqrt(2.0 / (1 + negative_slope**2))
        std = gain / math.sqrt(fan_in)
        weight_bound = math.sqrt(3.0) * std
        fc_w_attr = paddle.ParamAttr(initializer=nn.initializer.Uniform(
            low=-weight_bound, high=weight_bound))

    if not with_bias:
        fc_bias_attr = False

    return nn.Linear(input_size,
                     hidden_size,
                     weight_attr=fc_w_attr,
                     bias_attr=fc_bias_attr)
コード例 #5
0
 def __init__(self,
              in_channels,
              num_filters,
              filter_size,
              stride=1,
              padding=0,
              groups=1,
              act='relu',
              name=None):
     super(ConvBNLayer, self).__init__()
     self.conv = Conv2D(num_channels=in_channels,
                        num_filters=num_filters,
                        filter_size=filter_size,
                        stride=stride,
                        padding=padding,
                        groups=groups,
                        act=None,
                        param_attr=paddle.ParamAttr(name=name +
                                                    ".conv.weight"),
                        bias_attr=False)
     self.bn = BatchNorm(num_filters,
                         act=act,
                         epsilon=0.001,
                         param_attr=paddle.ParamAttr(name=name +
                                                     ".bn.weight"),
                         bias_attr=paddle.ParamAttr(name=name + ".bn.bias"),
                         moving_mean_name=name + '.bn.running_mean',
                         moving_variance_name=name + '.bn.running_var')
コード例 #6
0
 def __init__(self):
     super(ModelLinear2, self).__init__()
     with supernet(expand_ratio=None) as ofa_super:
         models = []
         models += [
             nn.Embedding(
                 num_embeddings=64,
                 embedding_dim=64,
                 weight_attr=paddle.ParamAttr(name='emb'))
         ]
         models += [
             nn.Linear(
                 64,
                 128,
                 weight_attr=paddle.ParamAttr(name='fc1_w'),
                 bias_attr=paddle.ParamAttr(name='fc1_b'))
         ]
         models += [
             nn.LayerNorm(
                 128,
                 weight_attr=paddle.ParamAttr(name='ln1_w'),
                 bias_attr=paddle.ParamAttr(name='ln1_b'))
         ]
         models += [nn.Linear(128, 256)]
         models = ofa_super.convert(models)
     self.models = paddle.nn.Sequential(*models)
コード例 #7
0
ファイル: YOLOv3.py プロジェクト: zzLoschicos/tutorials
    def __init__(self,
                 ch_in,
                 ch_out,
                 kernel_size=3,
                 stride=1,
                 groups=1,
                 padding=0,
                 act="leaky"):
        super(ConvBNLayer, self).__init__()

        self.conv = paddle.nn.Conv2D(
            in_channels=ch_in,
            out_channels=ch_out,
            kernel_size=kernel_size,
            stride=stride,
            padding=padding,
            groups=groups,
            weight_attr=paddle.ParamAttr(
                initializer=paddle.nn.initializer.Normal(0., 0.02)),
            bias_attr=False)

        self.batch_norm = paddle.nn.BatchNorm2D(
            num_features=ch_out,
            weight_attr=paddle.ParamAttr(
                initializer=paddle.nn.initializer.Normal(0., 0.02),
                regularizer=paddle.regularizer.L2Decay(0.)),
            bias_attr=paddle.ParamAttr(
                initializer=paddle.nn.initializer.Constant(0.0),
                regularizer=paddle.regularizer.L2Decay(0.)))
        self.act = act
コード例 #8
0
    def __init__(self,
                 vocab_size,
                 hidden_size=768,
                 hidden_dropout_prob=0.1,
                 max_position_embeddings=512,
                 type_vocab_size=16,
                 initializer_range=0.02,
                 topo=None):
        super(GPTEmbeddings, self).__init__()
        if topo is None or topo.mp_info.size == 1:
            self.word_embeddings = nn.Embedding(
                vocab_size,
                hidden_size,
                weight_attr=paddle.ParamAttr(
                    name="word_embeddings",
                    initializer=nn.initializer.Normal(
                        mean=0.0, std=initializer_range)))
        self.position_embeddings = nn.Embedding(
            max_position_embeddings,
            hidden_size,
            weight_attr=paddle.ParamAttr(
                name="pos_embeddings",
                initializer=nn.initializer.Normal(
                    mean=0.0, std=initializer_range)))

        self.dropout = nn.Dropout(hidden_dropout_prob)
コード例 #9
0
    def __init__(self, sparse_feature_number, sparse_feature_dim, fc_sizes):
        super(DNNLayer, self).__init__()
        self.sparse_feature_number = sparse_feature_number
        self.sparse_feature_dim = sparse_feature_dim
        self.fc_sizes = fc_sizes

        self.embedding = paddle.nn.Embedding(
            self.sparse_feature_number,
            self.sparse_feature_dim,
            padding_idx=0,
            sparse=True,
            weight_attr=paddle.ParamAttr(
                name="SparseFeatFactors",
                initializer=paddle.nn.initializer.Uniform()))

        sizes = [63] + self.fc_sizes + [1]
        acts = ["relu" for _ in range(len(self.fc_sizes))] + ["sigmoid"]
        self._layers = []
        for i in range(len(self.fc_sizes) + 1):
            linear = paddle.nn.Linear(
                in_features=sizes[i],
                out_features=sizes[i + 1],
                weight_attr=paddle.ParamAttr(
                    initializer=paddle.nn.initializer.Normal(
                        std=1.0 / math.sqrt(sizes[i]))))
            self.add_sublayer('linear_%d' % i, linear)
            self._layers.append(linear)
            if acts[i] == 'relu':
                act = paddle.nn.ReLU()
                self.add_sublayer('act_%d' % i, act)
                self._layers.append(act)
            if acts[i] == 'sigmoid':
                act = paddle.nn.layer.Sigmoid()
                self.add_sublayer('act_%d' % i, act)
                self._layers.append(act)
コード例 #10
0
def _run_dygraph_single(use_cuda, use_xpu, use_npu):
    """
    Testing the simple network in dygraph mode using one CPU/GPU/XPU/NPU.

    Args:
        use_cuda (bool): Whether running with CUDA.
        use_xpu (bool): Whether running with XPU.
        use_npu (bool): Whether running with NPU.
    """
    paddle.disable_static()
    if use_cuda:
        paddle.set_device('gpu')
    elif use_xpu:
        paddle.set_device('xpu')
    elif use_npu:
        paddle.set_device('npu')
    else:
        paddle.set_device('cpu')
    weight_attr = paddle.ParamAttr(
        name="weight", initializer=paddle.nn.initializer.Constant(value=0.5))
    bias_attr = paddle.ParamAttr(
        name="bias", initializer=paddle.nn.initializer.Constant(value=1.0))
    linear = paddle.nn.Linear(2,
                              4,
                              weight_attr=weight_attr,
                              bias_attr=bias_attr)
    input_np = _prepare_data(1)
    input_tensor = paddle.to_tensor(input_np)
    linear_out = linear(input_tensor)
    out = paddle.tensor.sum(linear_out)
    out.backward()
    opt = paddle.optimizer.Adam(learning_rate=0.001,
                                parameters=linear.parameters())
    opt.step()
コード例 #11
0
ファイル: elmo.py プロジェクト: yuweifamily/PaddleNLP
    def __init__(self, input_dim, num_layers):
        super(Highway, self).__init__()

        self._num_layers = num_layers

        self._highway_layers = []
        for i in range(num_layers):
            paramAttr = paddle.ParamAttr(initializer=I.Normal(
                mean=0.0, std=1.0 / np.sqrt(input_dim)))
            paramAttr_b = paddle.ParamAttr(initializer=I.Constant(value=-2.0))
            carry_linear = nn.Linear(
                input_dim,
                input_dim,
                weight_attr=paramAttr,
                bias_attr=paramAttr_b)
            self.add_sublayer('carry_linear_{}'.format(i), carry_linear)

            paramAttr = paddle.ParamAttr(initializer=I.Normal(
                mean=0.0, std=1.0 / np.sqrt(input_dim)))
            transform_linear = nn.Linear(
                input_dim, input_dim, weight_attr=paramAttr)
            self.add_sublayer('transform_linear_{}'.format(i), transform_linear)

            self._highway_layers.append([carry_linear, transform_linear])

        self._relu = nn.ReLU()
        self._sigmoid = nn.Sigmoid()
コード例 #12
0
    def build_model(self):
        x = paddle.static.data(
            name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')
        if self.is_training:
            ch = self.feed_shape[0][1]
            conv1 = paddle.static.nn.conv2d(
                x, num_filters=ch, filter_size=3, bias_attr=False)
            scale = paddle.ParamAttr(trainable=True)
            bias = paddle.ParamAttr(trainable=True)
            out = paddle.fluid.layers.nn.layer_norm(
                conv1, param_attr=scale, bias_attr=bias, **self.attrs)
            loss = paddle.mean(out)
            self.fetch_list = [loss.name]
        else:
            scale = self.attrs['scale']
            bias = self.attrs['shift']
            out = paddle.fluid.layers.nn.layer_norm(
                x, param_attr=scale, bias_attr=bias, **self.attrs)
            self.fetch_list = [out.name]

        if self.is_training:
            optimizer = None
            if self.optimizer == 'sgd':
                optimizer = paddle.optimizer.SGD(learning_rate=1e-2)
            elif self.optimizer == 'adam':
                optimizer = paddle.optimizer.Adam(learning_rate=1e-2)
            elif self.optimizer == 'lamb':
                optimizer = paddle.optimizer.Lamb(
                    learning_rate=1e-2, lamb_weight_decay=0.0)
            if optimizer is not None:
                optimizer.minimize(loss)
コード例 #13
0
ファイル: network.py プロジェクト: zhong110020/models
 def __init__(self, args):
     super(DNN, self).__init__()
     self.args = args
     self.init_value_ = 0.1
     sizes = [self.args.num_field * self.args.embedding_size
              ] + self.args.layer_sizes + [1]
     acts = [self.args.act
             for _ in range(len(self.args.layer_sizes))] + [None]
     w_scales = [
         self.init_value_ / math.sqrt(float(10))
         for _ in range(len(self.args.layer_sizes))
     ] + [self.init_value_]
     self._layers = []
     for i in range(len(self.args.layer_sizes) + 1):
         linear = paddle.nn.Linear(
             in_features=sizes[i],
             out_features=sizes[i + 1],
             weight_attr=paddle.ParamAttr(
                 initializer=paddle.nn.initializer.TruncatedNormal(
                     mean=0.0, std=w_scales[i])),
             bias_attr=paddle.ParamAttr(
                 initializer=paddle.nn.initializer.TruncatedNormal(
                     mean=0.0, std=self.init_value_)))
         #linear = getattr(paddle.nn.functional, acts[i])(linear) if acts[i] else linear
         if acts[i] == 'relu':
             act = paddle.nn.ReLU()
             self.add_sublayer('act_%d' % i, act)
         self.add_sublayer('linear_%d' % i, linear)
         self._layers.append(linear)
         self._layers.append(act)
コード例 #14
0
 def __init__(self,
              ch_in,
              ch_out,
              kernel_size,
              stride=1,
              padding=0,
              dilation=1,
              groups=1,
              bias=False):
     super(ConvLayer, self).__init__()
     bias_attr = False
     fan_in = ch_in * kernel_size**2
     bound = 1 / math.sqrt(fan_in)
     param_attr = paddle.ParamAttr(initializer=Uniform(-bound, bound))
     if bias:
         bias_attr = paddle.ParamAttr(initializer=Constant(0.))
     self.conv = nn.Conv2D(in_channels=ch_in,
                           out_channels=ch_out,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=padding,
                           dilation=dilation,
                           groups=groups,
                           weight_attr=param_attr,
                           bias_attr=bias_attr)
コード例 #15
0
ファイル: conv.py プロジェクト: wbj0110/models
    def __init__(self,
                 ernie,
                 input_size,
                 hidden_size,
                 learning_rate,
                 aggr_func='sum'):
        """ErnieSageV2: Ernie is applied to the EDGE of the text graph.

        Args:
            ernie (nn.Layer): the ernie model.
            input_size (int): input size of feature tensor.
            hidden_size (int): hidden size of the Conv layers.
            learning_rate (float): learning rate.
            aggr_func (str): aggregate function. 'sum', 'mean', 'max' avaliable.
        """
        super(ErnieSageV2Conv, self).__init__()
        assert aggr_func in ["sum", "mean", "max", "min"], \
            "Only support 'sum', 'mean', 'max', 'min' built-in receive function."
        self.aggr_func = "reduce_%s" % aggr_func

        self.self_linear = nn.Linear(
            input_size,
            hidden_size,
            weight_attr=paddle.ParamAttr(learning_rate=learning_rate))
        self.neigh_linear = nn.Linear(
            input_size,
            hidden_size,
            weight_attr=paddle.ParamAttr(learning_rate=learning_rate))

        self.ernie = ernie
コード例 #16
0
    def _construct(self, dim_in, dim_out, stride, dim_inner, num_groups,
                   dilation):
        str1x1, str3x3 = (stride, 1) if self._stride_1x1 else (1, stride)

        fan = (dim_inner) * (self.temp_kernel_size * 1 * 1)
        initializer_tmp = get_conv_init(fan)

        self.a = paddle.nn.Conv3D(
            in_channels=dim_in,
            out_channels=dim_inner,
            kernel_size=[self.temp_kernel_size, 1, 1],
            stride=[1, str1x1, str1x1],
            padding=[int(self.temp_kernel_size // 2), 0, 0],
            weight_attr=paddle.ParamAttr(initializer=initializer_tmp),
            bias_attr=False)
        self.a_bn = self.norm_module(
            num_features=dim_inner,
            epsilon=self._eps,
            weight_attr=get_bn_param_attr(),
            bias_attr=get_bn_param_attr(bn_weight=0.0))

        # 1x3x3, BN, ReLU.
        fan = (dim_inner) * (1 * 3 * 3)
        initializer_tmp = get_conv_init(fan)

        self.b = paddle.nn.Conv3D(
            in_channels=dim_inner,
            out_channels=dim_inner,
            kernel_size=[1, 3, 3],
            stride=[1, str3x3, str3x3],
            padding=[0, dilation, dilation],
            groups=num_groups,
            dilation=[1, dilation, dilation],
            weight_attr=paddle.ParamAttr(initializer=initializer_tmp),
            bias_attr=False)
        self.b_bn = self.norm_module(
            num_features=dim_inner,
            epsilon=self._eps,
            weight_attr=get_bn_param_attr(),
            bias_attr=get_bn_param_attr(bn_weight=0.0))

        # 1x1x1, BN.
        fan = (dim_out) * (1 * 1 * 1)
        initializer_tmp = get_conv_init(fan)

        self.c = paddle.nn.Conv3D(
            in_channels=dim_inner,
            out_channels=dim_out,
            kernel_size=[1, 1, 1],
            stride=[1, 1, 1],
            padding=[0, 0, 0],
            weight_attr=paddle.ParamAttr(initializer=initializer_tmp),
            bias_attr=False)
        self.c_bn = self.norm_module(
            num_features=dim_out,
            epsilon=self._eps,
            weight_attr=get_bn_param_attr(bn_weight=0.0),
            bias_attr=get_bn_param_attr(bn_weight=0.0))
コード例 #17
0
def _build_ln(n_in, name):
    return nn.LayerNorm(
        normalized_shape=n_in,
        weight_attr=paddle.ParamAttr(
            name='%s_layer_norm_scale' % name if name is not None else None,
            initializer=nn.initializer.Constant(1.)),
        bias_attr=paddle.ParamAttr(
            name='%s_layer_norm_bias' % name if name is not None else None,
            initializer=nn.initializer.Constant(1.)), )
コード例 #18
0
 def ops(self):
     """
     operation
     """
     fc = FC(size=self.fc_dim,
             param_attr=paddle.ParamAttr(name="%s.w" % self.name),
             bias_attr=paddle.ParamAttr(name="%s.b" % self.name),
             act=self.act)
     return fc
コード例 #19
0
 def __init__(self):
     super(Model, self).__init__()
     self.fc = nn.Linear(
         5,
         10,
         weight_attr=paddle.ParamAttr(
             initializer=nn.initializer.XavierNormal()),
         bias_attr=paddle.ParamAttr(
             initializer=nn.initializer.Constant(value=0.0)))
コード例 #20
0
    def __init__(self, sparse_feature_number, sparse_feature_dim, num_field,
                 ctr_layer_sizes, cvr_layer_sizes):
        super(ESMMLayer, self).__init__()
        self.sparse_feature_number = sparse_feature_number
        self.sparse_feature_dim = sparse_feature_dim
        self.num_field = num_field
        self.ctr_layer_sizes = ctr_layer_sizes
        self.cvr_layer_sizes = cvr_layer_sizes

        self.embedding = paddle.nn.Embedding(
            self.sparse_feature_number,
            self.sparse_feature_dim,
            sparse=True,
            padding_idx=0,
            weight_attr=paddle.ParamAttr(
                name="SparseFeatFactors",
                initializer=paddle.nn.initializer.Uniform()))

        # ctr part
        ctr_sizes = [sparse_feature_dim * num_field
                     ] + self.ctr_layer_sizes + [2]
        acts = ["relu" for _ in range(len(self.ctr_layer_sizes))] + [None]
        self._ctr_mlp_layers = []
        for i in range(len(ctr_layer_sizes) + 1):
            linear = paddle.nn.Linear(
                in_features=ctr_sizes[i],
                out_features=ctr_sizes[i + 1],
                weight_attr=paddle.ParamAttr(
                    initializer=paddle.nn.initializer.Normal(
                        std=1.0 / math.sqrt(ctr_sizes[i]))))
            self.add_sublayer('linear_%d' % i, linear)
            self._ctr_mlp_layers.append(linear)
            if acts[i] == 'relu':
                act = paddle.nn.ReLU()
                self.add_sublayer('act_%d' % i, act)
                self._ctr_mlp_layers.append(act)

        # ctr part
        cvr_sizes = [sparse_feature_dim * num_field
                     ] + self.cvr_layer_sizes + [2]
        acts = ["relu" for _ in range(len(self.cvr_layer_sizes))] + [None]
        self._cvr_mlp_layers = []
        for i in range(len(cvr_layer_sizes) + 1):
            linear = paddle.nn.Linear(
                in_features=cvr_sizes[i],
                out_features=cvr_sizes[i + 1],
                weight_attr=paddle.ParamAttr(
                    initializer=paddle.nn.initializer.Normal(
                        std=1.0 / math.sqrt(cvr_sizes[i]))))
            self.add_sublayer('linear_%d' % (len(ctr_layer_sizes) + 1 + i),
                              linear)
            self._cvr_mlp_layers.append(linear)
            if acts[i] == 'relu':
                act = paddle.nn.ReLU()
                self.add_sublayer('act_%d' % i, act)
                self._cvr_mlp_layers.append(act)
コード例 #21
0
ファイル: model.py プロジェクト: PaddlePaddle/Contrib
 def __init__(self, input_size, output_size):
     super(CustomGCNConv, self).__init__()
     self.input_size = input_size
     self.output_size = output_size
     weight_attr = paddle.ParamAttr(
         initializer=paddle.nn.initializer.XavierUniform())
     bias_attr = paddle.ParamAttr(
         initializer=paddle.nn.initializer.XavierUniform(
             fan_in=1, fan_out=output_size))
     self.linear = nn.Linear(input_size, output_size, weight_attr,
                             bias_attr)
コード例 #22
0
    def __init__(self,
                 word_emb_dim,
                 hidden_size,
                 vocab_size,
                 num_labels,
                 emb_lr=2.0,
                 crf_lr=0.2,
                 with_start_stop_tag=True):
        super(BiGruCrf, self).__init__()
        self.word_emb_dim = word_emb_dim
        self.vocab_size = vocab_size
        self.num_labels = num_labels
        self.hidden_size = hidden_size
        self.emb_lr = emb_lr
        self.crf_lr = crf_lr
        self.init_bound = 0.1

        self.word_embedding = nn.Embedding(
            num_embeddings=self.vocab_size,
            embedding_dim=self.word_emb_dim,
            weight_attr=paddle.ParamAttr(learning_rate=self.emb_lr,
                                         initializer=nn.initializer.Uniform(
                                             low=-self.init_bound,
                                             high=self.init_bound)))

        self.gru = nn.GRU(
            input_size=self.word_emb_dim,
            hidden_size=self.hidden_size,
            num_layers=2,
            direction='bidirectional',
            weight_ih_attr=paddle.ParamAttr(
                initializer=nn.initializer.Uniform(low=-self.init_bound,
                                                   high=self.init_bound),
                regularizer=paddle.regularizer.L2Decay(coeff=1e-4)),
            weight_hh_attr=paddle.ParamAttr(
                initializer=nn.initializer.Uniform(low=-self.init_bound,
                                                   high=self.init_bound),
                regularizer=paddle.regularizer.L2Decay(coeff=1e-4)))

        self.fc = nn.Linear(
            in_features=self.hidden_size * 2,
            out_features=self.num_labels + 2 \
                if with_start_stop_tag else self.num_labels,
            weight_attr=paddle.ParamAttr(
                initializer=nn.initializer.Uniform(
                    low=-self.init_bound, high=self.init_bound),
                regularizer=paddle.regularizer.L2Decay(coeff=1e-4)))

        self.crf = LinearChainCrf(self.num_labels, self.crf_lr,
                                  with_start_stop_tag)
        self.crf_loss = LinearChainCrfLoss(self.crf)
        self.viterbi_decoder = ViterbiDecoder(self.crf.transitions,
                                              with_start_stop_tag)
コード例 #23
0
    def __init__(self,
                 num_convs=4,
                 in_channels=256,
                 out_channels=256,
                 norm_type=None):
        super(MaskFeat, self).__init__()
        self.num_convs = num_convs
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.norm_type = norm_type
        fan_conv = out_channels * 3 * 3
        fan_deconv = out_channels * 2 * 2

        mask_conv = nn.Sequential()
        if norm_type == 'gn':
            for i in range(self.num_convs):
                conv_name = 'mask_inter_feat_{}'.format(i + 1)
                mask_conv.add_sublayer(
                    conv_name,
                    ConvNormLayer(
                        ch_in=in_channels if i == 0 else out_channels,
                        ch_out=out_channels,
                        filter_size=3,
                        stride=1,
                        norm_type=self.norm_type,
                        norm_name=conv_name + '_norm',
                        initializer=KaimingNormal(fan_in=fan_conv),
                        name=conv_name))
                mask_conv.add_sublayer(conv_name + 'act', nn.ReLU())
        else:
            for i in range(self.num_convs):
                conv_name = 'mask_inter_feat_{}'.format(i + 1)
                mask_conv.add_sublayer(
                    conv_name,
                    nn.Conv2D(
                        in_channels=in_channels if i == 0 else out_channels,
                        out_channels=out_channels,
                        kernel_size=3,
                        padding=1,
                        weight_attr=paddle.ParamAttr(initializer=KaimingNormal(
                            fan_in=fan_conv))))
                mask_conv.add_sublayer(conv_name + 'act', nn.ReLU())
        mask_conv.add_sublayer(
            'conv5_mask',
            nn.Conv2DTranspose(
                in_channels=self.in_channels,
                out_channels=self.out_channels,
                kernel_size=2,
                stride=2,
                weight_attr=paddle.ParamAttr(initializer=KaimingNormal(
                    fan_in=fan_deconv))))
        mask_conv.add_sublayer('conv5_mask' + 'act', nn.ReLU())
        self.upsample = mask_conv
コード例 #24
0
 def __init__(self, weight, output_size, global_dtype):
     super(SimpleMatmul, self).__init__()
     self.weight = paddle.create_parameter(
         shape=weight.shape,
         dtype=global_dtype,
         attr=paddle.ParamAttr(
             initializer=paddle.nn.initializer.Assign(weight)))
     self.bias = self.create_parameter(
         shape=[output_size],
         dtype=global_dtype,
         attr=paddle.ParamAttr(
             initializer=paddle.nn.initializer.Constant(0.0)))
コード例 #25
0
 def __init__(self, norm_shape=768, name=''):
     super(NormalizeLayer, self).__init__()
     self.name = name
     self.LayerNormal = nn.LayerNorm(
         norm_shape,
         epsilon=1e-05,
         weight_attr=paddle.ParamAttr(
             name=self.name + '_layer_norm_scale',
             initializer=nn.initializer.Constant(1.)),
         bias_attr=paddle.ParamAttr(
             name=self.name + '_layer_norm_bias',
             initializer=nn.initializer.Constant(0.)))
コード例 #26
0
 def __init__(self, in_features, out_features):
     super(Softmax, self).__init__()
     self.in_features = in_features
     self.out_features = out_features
     weight_arr = paddle.ParamAttr(
         initializer=paddle.nn.initializer.XavierUniform())
     bias_attr = paddle.ParamAttr(
         initializer=paddle.nn.initializer.Constant())
     self.linear = paddle.nn.Linear(in_features,
                                    out_features,
                                    weight_attr=weight_arr,
                                    bias_attr=bias_attr)
コード例 #27
0
ファイル: resnet.py プロジェクト: GuoxiaWang/PLSC
    def build_network(self, input, is_train=True):
        layers = self.layers
        supported_layers = [50, 100, 101, 152]
        assert layers in supported_layers, \
            "supported layers {}, but given {}".format(supported_layers, layers)

        depth = None
        if layers == 50:
            depth = [3, 4, 14, 3]
        elif layers == 100:
            depth = [3, 13, 30, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        num_filters = [64, 128, 256, 512]

        conv = self.conv_bn_layer(input=input.image,
                                  num_filters=64,
                                  filter_size=3,
                                  stride=1,
                                  pad=1,
                                  act='prelu',
                                  is_train=is_train)

        for block in range(len(depth)):
            for i in range(depth[block]):
                conv = self.bottleneck_block(input=conv,
                                             num_filters=num_filters[block],
                                             stride=2 if i == 0 else 1,
                                             is_train=is_train)

        bn = paddle.static.nn.batch_norm(input=conv,
                                         act=None,
                                         epsilon=2e-05,
                                         is_test=False if is_train else True)
        drop = paddle.nn.functional.dropout(x=bn,
                                            p=0.4,
                                            training=is_train,
                                            mode='upscale_in_train')
        fc = paddle.static.nn.fc(
            x=drop,
            size=self.emb_dim,
            weight_attr=paddle.ParamAttr(
                initializer=paddle.nn.initializer.XavierNormal(fan_in=0.0)),
            bias_attr=paddle.ParamAttr(
                initializer=paddle.nn.initializer.Constant()))
        emb = paddle.static.nn.batch_norm(input=fc,
                                          act=None,
                                          epsilon=2e-05,
                                          is_test=False if is_train else True)
        return emb
コード例 #28
0
ファイル: conv.py プロジェクト: WenjinW/PGL
 def __init__(self, input_size, output_size):
     super(NGCFConv, self).__init__()
     self.input_size = input_size
     self.output_size = output_size
     weight_attr = paddle.ParamAttr(
         initializer=nn.initializer.XavierUniform())
     bias_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform(
         fan_in=1, fan_out=output_size))
     self.linear = nn.Linear(input_size, output_size, weight_attr,
                             bias_attr)
     self.linear2 = nn.Linear(input_size, output_size, weight_attr,
                              bias_attr)
     self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
コード例 #29
0
ファイル: model64.py プロジェクト: adjaisd/DCGAN-paddle2.0
 def _block(self, in_channels, out_channels, kernel_size, stride, padding):
     return nn.Sequential(
         nn.Conv2DTranspose(
             in_channels, out_channels, kernel_size, stride, padding, bias_attr=False, 
             weight_attr=paddle.ParamAttr(initializer=conv_initializer() )
         ),
         nn.BatchNorm2D(
             out_channels, 
             weight_attr=paddle.ParamAttr(initializer=bn_initializer() ) ,
             momentum=0.8
         ),
         nn.ReLU(),
     )
コード例 #30
0
ファイル: model.py プロジェクト: tianxin1860/PaddleNLP
    def __init__(self):
        super(ReduceState, self).__init__()

        self.reduce_h = nn.Linear(
            config.hidden_dim * 2,
            config.hidden_dim,
            weight_attr=paddle.ParamAttr(initializer=I.Normal(
                std=config.trunc_norm_init_std)))
        self.reduce_c = nn.Linear(
            config.hidden_dim * 2,
            config.hidden_dim,
            weight_attr=paddle.ParamAttr(initializer=I.Normal(
                std=config.trunc_norm_init_std)))