Пример #1
0
 def static_graph_case_1(self):
     main = fluid.Program()
     start = fluid.Program()
     with fluid.unique_name.guard():
         with fluid.program_guard(main, start):
             if self.channel_last:
                 x = fluid.data("input", (-1, -1, -1, self.in_channels),
                                dtype=self.dtype)
             else:
                 x = fluid.data("input", (-1, self.in_channels, -1, -1),
                                dtype=self.dtype)
             y = fluid.layers.conv2d_transpose(
                 x,
                 self.out_channels,
                 output_size=self.output_size,
                 filter_size=self.filter_shape,
                 stride=self.stride,
                 padding=self.padding,
                 dilation=self.dilation,
                 groups=self.groups,
                 param_attr=I.NumpyArrayInitializer(self.weight),
                 bias_attr=False
                 if self.no_bias else I.NumpyArrayInitializer(self.bias),
                 data_format=self.data_format)
     exe = fluid.Executor(self.place)
     exe.run(start)
     out, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
     return out
Пример #2
0
 def fluid_layer(self, place):
     main = fluid.Program()
     start = fluid.Program()
     with fluid.unique_name.guard():
         with fluid.program_guard(main, start):
             input_shape = (-1, -1, -1, -1, self.num_channels) \
                 if self.channel_last else (-1, self.num_channels, -1, -1, -1)
             x_var = fluid.data("input", input_shape, dtype=self.dtype)
             weight_attr = I.NumpyArrayInitializer(self.weight)
             if self.bias is None:
                 bias_attr = False
             else:
                 bias_attr = I.NumpyArrayInitializer(self.bias)
             y_var = fluid.layers.conv3d_transpose(
                 x_var,
                 self.num_filters,
                 filter_size=self.filter_size,
                 output_size=self.output_size,
                 padding=self.padding,
                 stride=self.stride,
                 dilation=self.dilation,
                 groups=self.groups,
                 param_attr=weight_attr,
                 bias_attr=bias_attr,
                 use_cudnn=self.use_cudnn,
                 act=self.act,
                 data_format=self.data_format)
     feed_dict = {"input": self.input}
     exe = fluid.Executor(place)
     exe.run(start)
     y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var])
     return y_np
Пример #3
0
 def fluid_layer(self, place):
     main = fluid.Program()
     start = fluid.Program()
     with fluid.unique_name.guard():
         with fluid.program_guard(main, start):
             x = fluid.data("input", [-1, self.feature_size],
                            dtype=self.dtype)
             label = fluid.data("labels", [-1, 1], dtype="int64")
             if self.is_custom:
                 path_table = fluid.data("path_table", [-1, -1],
                                         dtype="int64")
                 path_code = fluid.data("path_code", [-1, -1],
                                        dtype="int64")
             else:
                 path_table = path_code = None
             y = fluid.layers.hsigmoid(
                 x,
                 label,
                 self.num_classes,
                 param_attr=I.NumpyArrayInitializer(self.weight),
                 bias_attr=I.NumpyArrayInitializer(self.bias),
                 path_table=path_table,
                 path_code=path_code,
                 is_custom=self.is_custom,
                 is_sparse=self.is_sparse,
             )
     exe = fluid.Executor(place)
     exe.run(start)
     feed_dict = {"input": self.input, "labels": self.labels}
     if self.is_custom:
         feed_dict["path_code"] = self.path_code
         feed_dict["path_table"] = self.path_table
     y_np, = exe.run(main, feed=feed_dict, fetch_list=[y])
     return y_np
Пример #4
0
    def test_fluid_api(self):
        train_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            x = fluid.data('x', [-1, self.feature_size])
            labels = fluid.data('labels', [-1, 1], 'int64')
            path_table = None
            path_code = None
            if self.is_custom:
                path_table = fluid.data('path_table', [-1, -1], 'int64')
                path_code = fluid.data('path_code', [-1, -1], 'int64')
            weight_attr = I.NumpyArrayInitializer(self.weight_np)
            bias_attr = I.NumpyArrayInitializer(self.bias_np)
            out = fluid.layers.hsigmoid(x, labels, self.num_classes,
                                        weight_attr, bias_attr, 'out',
                                        path_table, path_code, self.is_custom)

            exe = fluid.Executor(self.place)
            exe.run(startup_program)
            feed_dict = {'x': self.x_np, 'labels': self.labels_np}
            if self.is_custom:
                feed_dict["path_code"] = self.path_code_np
                feed_dict["path_table"] = self.path_table_np
            ret, = exe.run(train_program, feed=feed_dict, fetch_list=[out])

            self.assertTrue(np.allclose(ret, self.out_np))
Пример #5
0
    def test_static_api(self):
        train_program = paddle.static.Program()
        startup_program = paddle.static.Program()
        with paddle.static.program_guard(train_program, startup_program):
            x = paddle.static.data('x', [-1, self.feature_size])
            labels = paddle.static.data('labels', [-1, 1], 'int64')
            weight = paddle.static.data('weight', [-1, self.feature_size])
            bias = paddle.static.data('bias', [
                -1,
            ])
            path_table = None
            path_code = None
            if self.is_custom:
                path_table = paddle.static.data('path_table', [-1, -1],
                                                'int64')
                path_code = paddle.static.data('path_code', [-1, -1], 'int64')
            out1 = F.hsigmoid_loss(x, labels, self.num_classes, weight, bias,
                                   path_table, path_code)

            weight_attr = paddle.framework.ParamAttr(
                initializer=I.NumpyArrayInitializer(self.weight_np))
            bias_attr = paddle.framework.ParamAttr(
                initializer=I.NumpyArrayInitializer(self.bias_np))
            m = paddle.nn.HSigmoidLoss(self.feature_size, self.num_classes,
                                       weight_attr, bias_attr, self.is_custom)
            out2 = m(x, labels, path_table, path_code)

            exe = paddle.static.Executor(self.place)
            exe.run(startup_program)
            feed_dict = {
                'x': self.x_np,
                'labels': self.labels_np,
                'weight': self.weight_np,
                'bias': self.bias_np
            }
            if self.is_custom:
                feed_dict["path_code"] = self.path_code_np
                feed_dict["path_table"] = self.path_table_np
            ret1, ret2 = exe.run(train_program,
                                 feed=feed_dict,
                                 fetch_list=[out1, out2])

            for ret in [ret1, ret2]:
                self.assertTrue(np.allclose(self.out_np, ret))
Пример #6
0
 def nn_layer(self, place):
     with dg.guard(place):
         x_var = dg.to_variable(self.input)
         conv = nn.RowConv(self.num_channels,
                           self.context_size,
                           param_attr=I.NumpyArrayInitializer(self.weight),
                           act=self.act,
                           dtype=self.dtype)
         y_var = conv(x_var)
         y_np = y_var.numpy()
     return y_np
Пример #7
0
 def nn_layer(self):
     x_var = dg.to_variable(self.input)
     label_var = dg.to_variable(self.labels)
     if self.is_custom:
         path_code_var = dg.to_variable(self.path_code)
         path_table_var = dg.to_variable(self.path_table)
     else:
         path_code_var = path_table_var = None
     hierarchical_softmax = nn.HSigmoid(
         self.feature_size,
         self.num_classes,
         is_custom=self.is_custom,
         param_attr=I.NumpyArrayInitializer(self.weight),
         bias_attr=I.NumpyArrayInitializer(self.bias),
         dtype=self.dtype)
     y_var = hierarchical_softmax(x_var,
                                  label_var,
                                  path_table=path_table_var,
                                  path_code=path_code_var)
     y_np = y_var.numpy()
     return y_np
Пример #8
0
    def fluid_layer(self, place):
        main = fluid.Program()
        start = fluid.Program()
        with fluid.unique_name.guard():
            with fluid.program_guard(main, start):
                input_shape = (-1, -1, -1,self.num_channels) \
                    if self.channel_last else (-1, self.num_channels, -1, -1)
                x_var = fluid.data("input", input_shape, dtype=self.dtype)
                weight_attr = I.NumpyArrayInitializer(self.weight)
                if self.bias is None:
                    bias_attr = False
                else:
                    bias_attr = I.NumpyArrayInitializer(self.bias)
                if self.padding_mode != 'zeros':
                    x_var = F.pad(x_var,
                                  self._reversed_padding_repeated_twice,
                                  mode=self.padding_mode,
                                  data_format=self.data_format)
                    padding = 0
                else:
                    padding = self.padding

                y_var = fluid.layers.conv2d(
                    x_var,
                    self.num_filters,
                    self.filter_size,
                    padding=padding,
                    stride=self.stride,
                    dilation=self.dilation,
                    groups=self.groups,
                    param_attr=weight_attr,
                    bias_attr=bias_attr,
                    data_format=self.data_format)

        feed_dict = {"input": self.input}
        exe = fluid.Executor(place)
        exe.run(start)
        y_np, = exe.run(main, feed=feed_dict, fetch_list=[y_var])
        return y_np
Пример #9
0
    def __init__(self,
                 attention_dim,
                 input_dim,
                 position_encoding_weight=1.,
                 position_rate=1.,
                 reduction_factor=1,
                 has_bias=False,
                 bias_dim=0,
                 keep_prob=1.):
        super(AttentionBlock, self).__init__()
        # positional encoding
        omega_default = position_rate / reduction_factor
        self.omega_default = omega_default
        # multispeaker case
        if has_bias:
            std = np.sqrt(1.0 / bias_dim)
            initializer = I.NormalInitializer(loc=0., scale=std)
            self.q_pos_affine = dg.Linear(bias_dim, 1, param_attr=initializer)
            self.k_pos_affine = dg.Linear(bias_dim, 1, param_attr=initializer)
            self.omega_initial = self.create_parameter(
                shape=[1], attr=I.ConstantInitializer(value=omega_default))

        # mind the fact that q, k, v have the same feature dimension
        # so we can init k_affine and q_affine's weight as the same matrix
        # to get a better init attention
        init_weight = np.random.normal(size=(input_dim, attention_dim),
                                       scale=np.sqrt(1. / input_dim))
        initializer = I.NumpyArrayInitializer(init_weight.astype(np.float32))
        # 3 affine transformation to project q, k, v into attention_dim
        q_affine = dg.Linear(input_dim, attention_dim, param_attr=initializer)
        self.q_affine = weight_norm(q_affine, dim=-1)
        k_affine = dg.Linear(input_dim, attention_dim, param_attr=initializer)
        self.k_affine = weight_norm(k_affine, dim=-1)

        std = np.sqrt(1.0 / input_dim)
        initializer = I.NormalInitializer(loc=0., scale=std)
        v_affine = dg.Linear(input_dim, attention_dim, param_attr=initializer)
        self.v_affine = weight_norm(v_affine, dim=-1)

        std = np.sqrt(1.0 / attention_dim)
        initializer = I.NormalInitializer(loc=0., scale=std)
        out_affine = dg.Linear(attention_dim,
                               input_dim,
                               param_attr=initializer)
        self.out_affine = weight_norm(out_affine, dim=-1)

        self.keep_prob = keep_prob
        self.has_bias = has_bias
        self.bias_dim = bias_dim
        self.attention_dim = attention_dim
        self.position_encoding_weight = position_encoding_weight
Пример #10
0
 def static_graph_case(self):
     main = fluid.Program()
     start = fluid.Program()
     with fluid.unique_name.guard():
         with fluid.program_guard(main, start):
             x = fluid.data("input", self.input.shape, dtype=paddle.float32)
             y = fluid.layers.conv2d(
                 x,
                 self.num_filters,
                 self.filter_size,
                 stride=self.stride,
                 padding=self.padding,
                 dilation=self.dilation,
                 groups=self.groups,
                 param_attr=I.NumpyArrayInitializer(self.filter),
                 bias_attr=False if self.bias is None else
                 I.NumpyArrayInitializer(self.bias),
                 act=None,
                 data_format=self.data_format)
     exe = fluid.Executor()
     exe.run(start)
     out, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
     return out
Пример #11
0
    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        labels = paddle.to_tensor(self.labels_np)
        weight = paddle.to_tensor(self.weight_np)
        bias = paddle.to_tensor(self.bias_np)
        path_table = None
        path_code = None
        if self.is_custom:
            path_table = paddle.to_tensor(self.path_table_np)
            path_code = paddle.to_tensor(self.path_code_np)
        out1 = F.hsigmoid_loss(x, labels, self.num_classes, weight, bias,
                               path_table, path_code)

        weight_attr = I.NumpyArrayInitializer(self.weight_np)
        bias_attr = I.NumpyArrayInitializer(self.bias_np)
        m = paddle.nn.HSigmoidLoss(self.feature_size, self.num_classes,
                                   weight_attr, bias_attr, self.is_custom)
        out2 = m(x, labels, path_table, path_code)

        for out in [out1, out2]:
            self.assertTrue(np.allclose(self.out_np, out.numpy()))
        paddle.enable_static()
Пример #12
0
 def fluid_layer(self, place):
     main = fluid.Program()
     start = fluid.Program()
     with fluid.unique_name.guard():
         with fluid.program_guard(main, start):
             x = fluid.data("input", [-1, -1, self.num_channels],
                            dtype=self.dtype)
             y = fluid.layers.row_conv(x,
                                       self.context_size,
                                       param_attr=I.NumpyArrayInitializer(
                                           self.weight),
                                       act=self.act)
     exe = fluid.Executor(place)
     exe.run(start)
     y_np, = exe.run(main, feed={"input": self.input}, fetch_list=[y])
     return y_np
Пример #13
0
 def test_numpy_array_initializer(self):
     """Test the numpy array initializer with supplied arguments
     """
     import numpy
     program = framework.Program()
     block = program.global_block()
     np_array = numpy.random.random((10000)).astype("float32")
     for _ in range(2):
         block.create_parameter(
             dtype=np_array.dtype,
             shape=np_array.shape,
             lod_level=0,
             name="param",
             initializer=initializer.NumpyArrayInitializer(np_array))
     self.assertEqual(len(block.ops), 1)
     init_op = block.ops[0]
     self.assertEqual(init_op.type, 'assign_value')
     assert (init_op.attr('fp32_values') == np_array).all()
Пример #14
0
    def __init__(self, args, pretrained_embed=None):
        super(Model, self).__init__()
        self.args = args
        # the embedding layer
        self.word_embed = dygraph.Embedding(size=(args.n_words, args.n_embed))

        if args.pretrained_embed_shape is not None:
            if pretrained_embed is not None:
                pre_param_attrs = fluid.ParamAttr(
                    name="pretrained_emb",
                    initializer=initializer.NumpyArrayInitializer(
                        pretrained_embed),
                    trainable=True)
                self.pretrained = dygraph.Embedding(
                    size=args.pretrained_embed_shape,
                    param_attr=pre_param_attrs)
                self.word_embed.weight = layers.create_parameter(
                    shape=(self.args.n_words, self.args.n_embed),
                    dtype='float32',
                    default_initializer=initializer.Constant(value=0.0))
            else:
                self.pretrained = dygraph.Embedding(
                    size=args.pretrained_embed_shape)
        # Initialize feat feature, feat can be char or pos
        if args.feat == 'char':
            self.feat_embed = CharLSTM(n_chars=args.n_feats,
                                       n_embed=args.n_char_embed,
                                       n_out=args.n_feat_embed,
                                       pad_index=args.feat_pad_index)
        else:
            self.feat_embed = dygraph.Embedding(size=(args.n_feats,
                                                      args.n_feat_embed))
        self.embed_dropout = IndependentDropout(p=args.embed_dropout)

        # lstm layer
        self.lstm = BiLSTM(input_size=args.n_embed + args.n_feat_embed,
                           hidden_size=args.n_lstm_hidden,
                           num_layers=args.n_lstm_layers,
                           dropout=args.lstm_dropout)
        self.lstm_dropout = SharedDropout(p=args.lstm_dropout)

        # mlp layer
        self.mlp_arc_h = MLP(n_in=args.n_lstm_hidden * 2,
                             n_out=args.n_mlp_arc,
                             dropout=args.mlp_dropout)
        self.mlp_arc_d = MLP(n_in=args.n_lstm_hidden * 2,
                             n_out=args.n_mlp_arc,
                             dropout=args.mlp_dropout)
        self.mlp_rel_h = MLP(n_in=args.n_lstm_hidden * 2,
                             n_out=args.n_mlp_rel,
                             dropout=args.mlp_dropout)
        self.mlp_rel_d = MLP(n_in=args.n_lstm_hidden * 2,
                             n_out=args.n_mlp_rel,
                             dropout=args.mlp_dropout)

        # biaffine layers
        self.arc_attn = Biaffine(n_in=args.n_mlp_arc,
                                 bias_x=True,
                                 bias_y=False)
        self.rel_attn = Biaffine(n_in=args.n_mlp_rel,
                                 n_out=args.n_rels,
                                 bias_x=True,
                                 bias_y=True)
        self.pad_index = args.pad_index
        self.unk_index = args.unk_index