Exemplo n.º 1
0
        def parse_new_rnn():
            reset_parser()
            data = layer.data(
                name="word", type=data_type.dense_vector(dict_dim))
            label = layer.data(
                name="label", type=data_type.dense_vector(label_dim))
            emb = layer.embedding(input=data, size=word_dim)
            boot_layer = layer.data(
                name="boot", type=data_type.dense_vector(10))
            boot_layer = layer.fc(name='boot_fc', input=boot_layer, size=10)

            def step(y, wid):
                z = layer.embedding(input=wid, size=word_dim)
                mem = layer.memory(
                    name="rnn_state", size=hidden_dim, boot_layer=boot_layer)
                out = layer.fc(input=[y, z, mem],
                               size=hidden_dim,
                               act=activation.Tanh(),
                               bias_attr=True,
                               name="rnn_state")
                return out

            out = layer.recurrent_group(
                name="rnn", step=step, input=[emb, data])

            rep = layer.last_seq(input=out)
            prob = layer.fc(size=label_dim,
                            input=rep,
                            act=activation.Softmax(),
                            bias_attr=True)

            cost = layer.classification_cost(input=prob, label=label)

            return str(layer.parse_network(cost))
Exemplo n.º 2
0
    def test_operator(self):
        ipt0 = layer.data(name='data', type=data_type.dense_vector(784))
        ipt1 = layer.data(name='word', type=data_type.dense_vector(128))
        fc0 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid())
        fc1 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid())

        dotmul_op = layer.dotmul_operator(a=fc0, b=fc1)
        dotmul0 = layer.mixed(input=dotmul_op)
        with layer.mixed() as dotmul1:
            dotmul1 += dotmul_op

        conv = layer.conv_operator(img=ipt0,
                                   filter=ipt1,
                                   filter_size=1,
                                   num_channels=1,
                                   num_filters=128,
                                   stride=1,
                                   padding=0)
        conv0 = layer.mixed(input=conv)
        with layer.mixed() as conv1:
            conv1 += conv

        print layer.parse_network(dotmul0)
        print layer.parse_network(dotmul1)
        print layer.parse_network(conv0)
        print layer.parse_network(conv1)
Exemplo n.º 3
0
    def test_operator(self):
        ipt0 = layer.data(name='data1', type=data_type.dense_vector(784))
        ipt1 = layer.data(name='word1', type=data_type.dense_vector(128))
        fc0 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid())
        fc1 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid())

        dotmul_op = layer.dotmul_operator(a=fc0, b=fc1)
        dotmul0 = layer.mixed(input=dotmul_op)
        with layer.mixed() as dotmul1:
            dotmul1 += dotmul_op

        conv = layer.conv_operator(
            img=ipt0,
            filter=ipt1,
            filter_size=1,
            num_channels=1,
            num_filters=128,
            stride=1,
            padding=0)
        conv0 = layer.mixed(input=conv)
        with layer.mixed() as conv1:
            conv1 += conv

        print layer.parse_network(dotmul0)
        print layer.parse_network(dotmul1)
        print layer.parse_network(conv0)
        print layer.parse_network(conv1)
Exemplo n.º 4
0
        def parse_new_rnn():
            data = layer.data(name="word",
                              type=data_type.dense_vector(dict_dim))
            label = layer.data(name="label",
                               type=data_type.dense_vector(label_dim))
            emb = layer.embedding(input=data, size=word_dim)
            boot_layer = layer.data(name="boot",
                                    type=data_type.dense_vector(10))
            boot_layer = layer.fc(name='boot_fc', input=boot_layer, size=10)

            def step(y, wid):
                z = layer.embedding(input=wid, size=word_dim)
                mem = layer.memory(name="rnn_state",
                                   size=hidden_dim,
                                   boot_layer=boot_layer)
                out = layer.fc(input=[y, z, mem],
                               size=hidden_dim,
                               act=activation.Tanh(),
                               bias_attr=True,
                               name="rnn_state")
                return out

            out = layer.recurrent_group(name="rnn",
                                        step=step,
                                        input=[emb, data])

            rep = layer.last_seq(input=out)
            prob = layer.fc(size=label_dim,
                            input=rep,
                            act=activation.Softmax(),
                            bias_attr=True)

            cost = layer.classification_cost(input=prob, label=label)

            return str(layer.parse_network(cost))
Exemplo n.º 5
0
    def network(self):
        """
        Implements the whole network of Match-LSTM.

        Returns:
            A tuple of LayerOutput objects containing the start and end
            probability distributions respectively.
        """
        self.check_and_create_data()
        self.create_shared_params()
        q_enc = self.get_enc(self.q_ids, type='q')
        p_encs = []
        p_matches = []
        for p in self.p_ids:
            p_encs.append(self.get_enc(p, type='p'))

        q_proj_left = layer.fc(size=self.emb_dim * 2,
                               bias_attr=False,
                               param_attr=Attr.Param(
                                   self.name + '_left_' + '.wq'),
                               input=q_enc)
        q_proj_right = layer.fc(size=self.emb_dim * 2,
                                bias_attr=False,
                                param_attr=Attr.Param(
                                    self.name + '_right_' + '.wq'),
                                input=q_enc)
        for i, p in enumerate(p_encs):
            left_out = self.recurrent_group(
                       self.name + '_left_' + str(i),
                       [layer.StaticInput(q_enc),
                           layer.StaticInput(q_proj_left), p],
                       reverse=False)
            right_out = self.recurrent_group(
                        self.name + '_right_' + str(i),
                        [layer.StaticInput(q_enc),
                            layer.StaticInput(q_proj_right), p],
                        reverse=True)
            match_seq = layer.concat(input=[left_out, right_out])
            match_seq_dropped = self.drop_out(match_seq, drop_rate=0.5)
            bi_match_seq = paddle.networks.bidirectional_lstm(
                    input=match_seq_dropped,
                    size=match_seq.size,
                    fwd_mat_param_attr=Attr.Param('pn_f_enc_mat.w'),
                    fwd_bias_param_attr=Attr.Param('pn_f_enc.bias',
                        initial_std=0.),
                    fwd_inner_param_attr=Attr.Param('pn_f_enc_inn.w'),
                    bwd_mat_param_attr=Attr.Param('pn_b_enc_mat.w'),
                    bwd_bias_param_attr=Attr.Param('pn_b_enc.bias',
                        initial_std=0.),
                    bwd_inner_param_attr=Attr.Param('pn_b_enc_inn.w'),
                    return_seq=True)
            p_matches.append(bi_match_seq)

        all_docs = reduce(lambda x, y: layer.seq_concat(a=x, b=y),
                    p_matches)
        all_docs_dropped = self.drop_out(all_docs, drop_rate=0.5)
        start = self.decode('start', all_docs_dropped)
        end = self.decode('end', all_docs_dropped)
        return start, end
Exemplo n.º 6
0
    def network(self):
        """
        Implements the whole network of Match-LSTM.

        Returns:
            A tuple of LayerOutput objects containing the start and end
            probability distributions respectively.
        """
        self.check_and_create_data()
        self.create_shared_params()
        q_enc = self.get_enc(self.q_ids, type='q')
        p_encs = []
        p_matches = []
        for p in self.p_ids:
            p_encs.append(self.get_enc(p, type='p'))

        q_proj_left = layer.fc(size=self.emb_dim * 2,
                               bias_attr=False,
                               param_attr=Attr.Param(self.name + '_left_' +
                                                     '.wq'),
                               input=q_enc)
        q_proj_right = layer.fc(size=self.emb_dim * 2,
                                bias_attr=False,
                                param_attr=Attr.Param(self.name + '_right_' +
                                                      '.wq'),
                                input=q_enc)
        for i, p in enumerate(p_encs):
            left_out = self.recurrent_group(
                self.name + '_left_' + str(i),
                [layer.StaticInput(q_enc),
                 layer.StaticInput(q_proj_left), p],
                reverse=False)
            right_out = self.recurrent_group(
                self.name + '_right_' + str(i),
                [layer.StaticInput(q_enc),
                 layer.StaticInput(q_proj_right), p],
                reverse=True)
            match_seq = layer.concat(input=[left_out, right_out])
            match_seq_dropped = self.drop_out(match_seq, drop_rate=0.5)
            bi_match_seq = paddle.networks.bidirectional_lstm(
                input=match_seq_dropped,
                size=match_seq.size,
                fwd_mat_param_attr=Attr.Param('pn_f_enc_mat.w'),
                fwd_bias_param_attr=Attr.Param('pn_f_enc.bias',
                                               initial_std=0.),
                fwd_inner_param_attr=Attr.Param('pn_f_enc_inn.w'),
                bwd_mat_param_attr=Attr.Param('pn_b_enc_mat.w'),
                bwd_bias_param_attr=Attr.Param('pn_b_enc.bias',
                                               initial_std=0.),
                bwd_inner_param_attr=Attr.Param('pn_b_enc_inn.w'),
                return_seq=True)
            p_matches.append(bi_match_seq)

        all_docs = reduce(lambda x, y: layer.seq_concat(a=x, b=y), p_matches)
        all_docs_dropped = self.drop_out(all_docs, drop_rate=0.5)
        start = self.decode('start', all_docs_dropped)
        end = self.decode('end', all_docs_dropped)
        return start, end
Exemplo n.º 7
0
    def test_projection(self):
        input = layer.data(name='data', type=data_type.dense_vector(784))
        word = layer.data(
            name='word', type=data_type.integer_value_sequence(10000))
        fc0 = layer.fc(input=input, size=100, act=activation.Sigmoid())
        fc1 = layer.fc(input=input, size=200, act=activation.Sigmoid())
        mixed0 = layer.mixed(
            size=256,
            input=[
                layer.full_matrix_projection(input=fc0),
                layer.full_matrix_projection(input=fc1)
            ])
        with layer.mixed(size=200) as mixed1:
            mixed1 += layer.full_matrix_projection(input=fc0)
            mixed1 += layer.identity_projection(input=fc1)

        table = layer.table_projection(input=word)
        emb0 = layer.mixed(size=512, input=table)
        with layer.mixed(size=512) as emb1:
            emb1 += table

        scale = layer.scaling_projection(input=fc0)
        scale0 = layer.mixed(size=100, input=scale)
        with layer.mixed(size=100) as scale1:
            scale1 += scale

        dotmul = layer.dotmul_projection(input=fc0)
        dotmul0 = layer.mixed(size=100, input=dotmul)
        with layer.mixed(size=100) as dotmul1:
            dotmul1 += dotmul

        context = layer.context_projection(input=fc0, context_len=5)
        context0 = layer.mixed(size=100, input=context)
        with layer.mixed(size=100) as context1:
            context1 += context

        conv = layer.conv_projection(
            input=input,
            filter_size=1,
            num_channels=1,
            num_filters=128,
            stride=1,
            padding=0)
        conv0 = layer.mixed(input=conv, bias_attr=True)
        with layer.mixed(bias_attr=True) as conv1:
            conv1 += conv

        print layer.parse_network(mixed0)
        print layer.parse_network(mixed1)
        print layer.parse_network(emb0)
        print layer.parse_network(emb1)
        print layer.parse_network(scale0)
        print layer.parse_network(scale1)
        print layer.parse_network(dotmul0)
        print layer.parse_network(dotmul1)
        print layer.parse_network(conv0)
        print layer.parse_network(conv1)
Exemplo n.º 8
0
    def test_projection(self):
        input = layer.data(name='data2', type=data_type.dense_vector(784))
        word = layer.data(
            name='word2', type=data_type.integer_value_sequence(10000))
        fc0 = layer.fc(input=input, size=100, act=activation.Sigmoid())
        fc1 = layer.fc(input=input, size=200, act=activation.Sigmoid())
        mixed0 = layer.mixed(
            size=256,
            input=[
                layer.full_matrix_projection(input=fc0),
                layer.full_matrix_projection(input=fc1)
            ])
        with layer.mixed(size=200) as mixed1:
            mixed1 += layer.full_matrix_projection(input=fc0)
            mixed1 += layer.identity_projection(input=fc1)

        table = layer.table_projection(input=word)
        emb0 = layer.mixed(size=512, input=table)
        with layer.mixed(size=512) as emb1:
            emb1 += table

        scale = layer.scaling_projection(input=fc0)
        scale0 = layer.mixed(size=100, input=scale)
        with layer.mixed(size=100) as scale1:
            scale1 += scale

        dotmul = layer.dotmul_projection(input=fc0)
        dotmul0 = layer.mixed(size=100, input=dotmul)
        with layer.mixed(size=100) as dotmul1:
            dotmul1 += dotmul

        context = layer.context_projection(input=fc0, context_len=5)
        context0 = layer.mixed(size=500, input=context)
        with layer.mixed(size=500) as context1:
            context1 += context

        conv = layer.conv_projection(
            input=input,
            filter_size=1,
            num_channels=1,
            num_filters=128,
            stride=1,
            padding=0)
        conv0 = layer.mixed(input=conv, bias_attr=True)
        with layer.mixed(bias_attr=True) as conv1:
            conv1 += conv

        print layer.parse_network(mixed0)
        print layer.parse_network(mixed1)
        print layer.parse_network(emb0)
        print layer.parse_network(emb1)
        print layer.parse_network(scale0)
        print layer.parse_network(scale1)
        print layer.parse_network(dotmul0)
        print layer.parse_network(dotmul1)
        print layer.parse_network(conv0)
        print layer.parse_network(conv1)
Exemplo n.º 9
0
 def _build_dnn_submodel_(self, dnn_layer_dims):
     '''
     build DNN submodel.
     '''
     dnn_embedding = layer.fc(input=self.dnn_merged_input,
                              size=dnn_layer_dims[0])
     _input_layer = dnn_embedding
     for i, dim in enumerate(dnn_layer_dims[1:]):
         fc = layer.fc(input=_input_layer,
                       size=dim,
                       act=paddle.activation.Relu(),
                       name='dnn-fc-%d' % i)
         _input_layer = fc
     return _input_layer
Exemplo n.º 10
0
 def test_get_layer(self):
     pixel = layer.data(name='pixel2', type=data_type.dense_vector(784))
     label = layer.data(name='label2', type=data_type.integer_value(10))
     hidden = layer.fc(input=pixel,
                       size=100,
                       act=conf_helps.SigmoidActivation())
     inference = layer.fc(input=hidden,
                          size=10,
                          act=conf_helps.SoftmaxActivation())
     cost = layer.classification_cost(input=inference, label=label)
     topo = topology.Topology(cost)
     pixel_layer = topo.get_layer("pixel2")
     label_layer = topo.get_layer("label2")
     self.assertEqual(pixel_layer, pixel)
     self.assertEqual(label_layer, label)
Exemplo n.º 11
0
    def _attention(self, direct, cur_token, prev, to_apply, to_apply_proj):
        with layer.mixed(size=cur_token.size,
                         bias_attr=Attr.Param(direct + '.bp',
                             initial_std=0.),
                         act=Act.Linear()) as proj:
            proj += layer.full_matrix_projection(
                    input=cur_token,
                    param_attr=Attr.Param(direct + '.wp'))
            proj += layer.full_matrix_projection(
                    input=prev,
                    param_attr=Attr.Param(direct + '.wr'))

        expanded = layer.expand(input=proj, expand_as=to_apply)
        att_context = layer.addto(input=[expanded, to_apply_proj],
                                  act=Act.Tanh(),
                                  bias_attr=False)

        att_weights = layer.fc(input=att_context,
                               param_attr=Attr.Param(direct + '.w'),
                               bias_attr=Attr.Param(direct + '.b',
                                   initial_std=0.),
                               act=Act.SequenceSoftmax(),
                               size=1)
        scaled = layer.scaling(input=to_apply, weight=att_weights)
        applied = layer.pooling(input=scaled,
                                pooling_type=paddle.pooling.Sum())
        return applied
Exemplo n.º 12
0
    def _attention(self, direct, cur_token, prev, to_apply, to_apply_proj):
        with layer.mixed(size=cur_token.size,
                         bias_attr=Attr.Param(direct + '.bp', initial_std=0.),
                         act=Act.Linear()) as proj:
            proj += layer.full_matrix_projection(input=cur_token,
                                                 param_attr=Attr.Param(direct +
                                                                       '.wp'))
            proj += layer.full_matrix_projection(input=prev,
                                                 param_attr=Attr.Param(direct +
                                                                       '.wr'))

        expanded = layer.expand(input=proj, expand_as=to_apply)
        att_context = layer.addto(input=[expanded, to_apply_proj],
                                  act=Act.Tanh(),
                                  bias_attr=False)

        att_weights = layer.fc(input=att_context,
                               param_attr=Attr.Param(direct + '.w'),
                               bias_attr=Attr.Param(direct + '.b',
                                                    initial_std=0.),
                               act=Act.SequenceSoftmax(),
                               size=1)
        scaled = layer.scaling(input=to_apply, weight=att_weights)
        applied = layer.pooling(input=scaled,
                                pooling_type=paddle.pooling.Sum())
        return applied
Exemplo n.º 13
0
 def _build_dnn_submodel_(self, dnn_layer_dims):
     '''
     build DNN submodel.  # dnn_layer_dims = [128, 64, 32, 1]
     '''
     #dnn_merged_input是一个数据层,这个全连接层是128维的
     dnn_embedding = layer.fc(input=self.dnn_merged_input,
                              size=dnn_layer_dims[0])
     _input_layer = dnn_embedding
     for i, dim in enumerate(dnn_layer_dims[1:]):  #64, 32, 1
         fc = layer.fc(
             input=_input_layer,
             size=dim,
             act=paddle.activation.Relu(),  #ReLU activation
             name='dnn-fc-%d' % i)
         _input_layer = fc
     return _input_layer
Exemplo n.º 14
0
 def _build_lr_submodel_(self):
     '''
     config LR submodel
     '''
     fc = layer.fc(
         input=self.lr_merged_input, size=1, act=paddle.activation.Relu())
     return fc
Exemplo n.º 15
0
 def _build_regression_model(self, dnn, lr):
     merge_layer = layer.concat(input=[dnn, lr])
     self.output = layer.fc(
         input=merge_layer, size=1, act=paddle.activation.Sigmoid())
     if not self.is_infer:
         self.train_cost = paddle.layer.mse_cost(
             input=self.output, label=self.click)
     return self.output
Exemplo n.º 16
0
    def test_parse(self):
        pixel = layer.data(name='pixel3', type=data_type.dense_vector(784))
        label = layer.data(name='label3', type=data_type.integer_value(10))
        hidden = layer.fc(input=pixel,
                          size=100,
                          act=conf_helps.SigmoidActivation())
        inference = layer.fc(input=hidden,
                             size=10,
                             act=conf_helps.SoftmaxActivation())
        maxid = layer.max_id(input=inference)
        cost1 = layer.classification_cost(input=inference, label=label)
        cost2 = layer.cross_entropy_cost(input=inference, label=label)

        topology.Topology(cost2).proto()
        topology.Topology([cost1]).proto()
        topology.Topology([cost1, cost2]).proto()
        topology.Topology([inference, maxid]).proto()
Exemplo n.º 17
0
 def new_step(y):
     mem = layer.memory(name="rnn_state", size=hidden_dim)
     out = layer.fc(input=[y, mem],
                    size=hidden_dim,
                    act=activation.Tanh(),
                    bias_attr=True,
                    name="rnn_state")
     return out
Exemplo n.º 18
0
 def new_step(y):
     mem = layer.memory(name="rnn_state", size=hidden_dim)
     out = layer.fc(input=[y, mem],
                    size=hidden_dim,
                    act=activation.Tanh(),
                    bias_attr=True,
                    name="rnn_state")
     return out
Exemplo n.º 19
0
 def step(y, wid):
     z = layer.embedding(input=wid, size=word_dim)
     mem = layer.memory(
         name="rnn_state", size=hidden_dim, boot_layer=boot_layer)
     out = layer.fc(input=[y, z, mem],
                    size=hidden_dim,
                    act=activation.Tanh(),
                    bias_attr=True,
                    name="rnn_state")
     return out
Exemplo n.º 20
0
 def step(y, wid):
     z = layer.embedding(input=wid, size=word_dim)
     mem = layer.memory(name="rnn_state",
                        size=hidden_dim,
                        boot_layer=boot_layer)
     out = layer.fc(input=[y, z, mem],
                    size=hidden_dim,
                    act=activation.Tanh(),
                    bias_attr=True,
                    name="rnn_state")
     return out
Exemplo n.º 21
0
 def _step_basic(self, h_cur, u):
     expanded_h = layer.expand(input=h_cur, expand_as=u)
     hu = layer.concat(input=[expanded_h, u])
     with layer.mixed(bias_attr=False) as dot_hu:
         dot_hu += layer.dotmul_operator(a=expanded_h, b=u)
     cat_all = layer.concat(input=[hu, dot_hu])
     s = layer.fc(size=1,
                  bias_attr=False,
                  param_attr=Attr.Param(self.name + '.ws'),
                  input=cat_all)
     return s
Exemplo n.º 22
0
    def _build_classification_model(self, dnn, lr):
        merge_layer = layer.concat(input=[dnn, lr])
        self.output = layer.fc(
            input=merge_layer,
            size=1,
            # 利用sigmoid函数预估CTR比率
            act=paddle.activation.Sigmoid())

        if not self.is_infer:
            self.train_cost = paddle.layer.multi_binary_label_cross_entropy_cost(
                input=self.output, label=self.click)
        return self.output
Exemplo n.º 23
0
 def _combine_submodels_(self, dnn, lr):
     '''
     conbine DNN and LR submodels
     '''
     merge_layer = layer.concat(input=[dnn, lr])
     fc = layer.fc(
         input=merge_layer,
         size=1,
         name='output',
         # use sigmoid function to approximate ctr rate, a float value between 0 and 1.
         act=paddle.activation.Sigmoid())
     return fc
Exemplo n.º 24
0
    def test_evaluator(self):
        img = layer.data(name='pixel2', type=data_type.dense_vector(784))
        output = layer.fc(input=img,
                          size=10,
                          act=activation.Softmax(),
                          name='fc_here')
        lbl = layer.data(name='label2', type=data_type.integer_value(10))
        cost = layer.cross_entropy_cost(input=output, label=lbl)

        evaluator.classification_error(input=output, label=lbl)
        print layer.parse_network(cost)
        print layer.parse_network(output)
Exemplo n.º 25
0
    def decode(self, name, input):
        """
        Implements the answer pointer part of the model.

        Args:
            name: name prefix of the layers defined in this method.
            input: the encoding of the paragraph.

        Returns:
            A probability distribution over temporal axis.
        """
        latent = layer.fc(size=input.size / 2,
                          input=input,
                          act=Act.Tanh(),
                          bias_attr=False)
        probs = layer.fc(
                name=name,
                size=1,
                input=latent,
                act=Act.SequenceSoftmax())
        return probs
Exemplo n.º 26
0
    def _build_classification_model(self, dnn, lr):
        merge_layer = layer.concat(input=[dnn, lr])
        self.output = layer.fc(
            input=merge_layer,
            size=1,
            # use sigmoid function to approximate ctr rate, a float value between 0 and 1.
            act=paddle.activation.Sigmoid())

        if not self.is_infer:
            self.train_cost = paddle.layer.multi_binary_label_cross_entropy_cost(
                input=self.output, label=self.click)
        return self.output
Exemplo n.º 27
0
    def test_evaluator(self):
        img = layer.data(name='pixel2', type=data_type.dense_vector(784))
        output = layer.fc(input=img,
                          size=10,
                          act=activation.Softmax(),
                          name='fc_here')
        lbl = layer.data(name='label2', type=data_type.integer_value(10))
        cost = layer.cross_entropy_cost(input=output, label=lbl)

        evaluator.classification_error(input=output, label=lbl)
        print layer.parse_network(cost)
        print layer.parse_network(output)
Exemplo n.º 28
0
    def network(self):
        """
        Implements the detail of the model.
        """
        self.check_and_create_data()
        self.create_shared_params()
        q_enc = self.get_enc(self.q_ids, type='q')
        a_enc = self.get_enc(self.a_ids, type='q')

        q_proj_left = layer.fc(size=self.emb_dim * 2,
                               bias_attr=False,
                               param_attr=Attr.Param(self.name + '_left.wq'),
                               input=q_enc)
        q_proj_right = layer.fc(size=self.emb_dim * 2,
                                bias_attr=False,
                                param_attr=Attr.Param(self.name + '_right.wq'),
                                input=q_enc)
        left_match = self.recurrent_group(
            self.name + '_left',
            [layer.StaticInput(q_enc),
             layer.StaticInput(q_proj_left), a_enc],
            reverse=False)
        right_match = self.recurrent_group(
            self.name + '_right',
            [layer.StaticInput(q_enc),
             layer.StaticInput(q_proj_right), a_enc],
            reverse=True)
        match_seq = layer.concat(input=[left_match, right_match])
        with layer.mixed(size=match_seq.size,
                         act=Act.Identity(),
                         layer_attr=Attr.ExtraLayerAttribute(drop_rate=0.2),
                         bias_attr=False) as dropped:
            dropped += layer.identity_projection(match_seq)
        match_result = layer.pooling(input=dropped,
                                     pooling_type=paddle.pooling.Max())
        cls = layer.fc(input=match_result,
                       act=Act.Softmax(),
                       size=self.label_dim)
        return cls
Exemplo n.º 29
0
    def network(self):
        """
        Implements the detail of the model.
        """
        self.check_and_create_data()
        self.create_shared_params()
        q_enc = self.get_enc(self.q_ids, type='q')
        a_enc = self.get_enc(self.a_ids, type='q')

        q_proj_left = layer.fc(size=self.emb_dim * 2,
                bias_attr=False,
                param_attr=Attr.Param(self.name + '_left.wq'),
                input=q_enc)
        q_proj_right = layer.fc(size=self.emb_dim * 2,
                bias_attr=False,
                param_attr=Attr.Param(self.name + '_right.wq'),
                input=q_enc)
        left_match = self.recurrent_group(self.name + '_left',
                [layer.StaticInput(q_enc),
                    layer.StaticInput(q_proj_left), a_enc],
                reverse=False)
        right_match = self.recurrent_group(self.name + '_right',
                [layer.StaticInput(q_enc),
                    layer.StaticInput(q_proj_right), a_enc],
                reverse=True)
        match_seq = layer.concat(input=[left_match, right_match])
        with layer.mixed(size=match_seq.size,
                act=Act.Identity(),
                layer_attr=Attr.ExtraLayerAttribute(drop_rate=0.2),
                bias_attr=False) as dropped:
            dropped += layer.identity_projection(match_seq)
        match_result = layer.pooling(input=dropped,
                pooling_type=paddle.pooling.Max())
        cls = layer.fc(input=match_result,
                act=Act.Softmax(),
                size=self.label_dim)
        return cls
Exemplo n.º 30
0
    def network(self, question, evidence, qe_comm, ee_comm, conf):
        """
        Implements the whole network of Match-LSTM.

        Returns:
            A tuple of LayerOutput objects containing the start and end
            probability distributions respectively.
        """

        q_enc = self.get_enc(question, conf, type='q')
        p_enc = self.get_enc(evidence, conf, type='q')

        q_proj_left = layer.fc(size=conf.word_vec_dim * 2,
                               bias_attr=False,
                               param_attr=Attr.Param(self.name + '_left_' +
                                                     '.wq'),
                               input=q_enc)
        q_proj_right = layer.fc(size=conf.word_vec_dim * 2,
                                bias_attr=False,
                                param_attr=Attr.Param(self.name + '_right_' +
                                                      '.wq'),
                                input=q_enc)
        # StaticInput 定义了一个只读的Memory,由StaticInput指定的输入不会被recurrent_group拆解,
        # recurrent_group 循环展开的每个时间步总是能够引用所有输入,可以是一个非序列,或者一个单层序列。
        left_out = self.recurrent_group(self.name + '_left', [
            layer.StaticInput(q_enc),
            layer.StaticInput(q_proj_left), p_enc, qe_comm, ee_comm
        ],
                                        reverse=False)
        right_out = self.recurrent_group(self.name + '_right_', [
            layer.StaticInput(q_enc),
            layer.StaticInput(q_proj_right), p_enc, qe_comm, ee_comm
        ],
                                         reverse=True)
        match_seq = layer.concat(input=[left_out, right_out])
        return self.drop_out(match_seq, drop_rate=0.5)
Exemplo n.º 31
0
    def test_data_type(self):
        pixel = layer.data(name='pixel', type=data_type.dense_vector(784))
        label = layer.data(name='label', type=data_type.integer_value(10))
        hidden = layer.fc(input=pixel,
                          size=100,
                          act=conf_helps.SigmoidActivation())
        inference = layer.fc(input=hidden,
                             size=10,
                             act=conf_helps.SoftmaxActivation())
        cost = layer.classification_cost(input=inference, label=label)
        topo = topology.Topology(cost)
        data_types = topo.data_type()
        self.assertEqual(len(data_types), 2)
        pixel_data_type = filter(lambda type: type[0] == "pixel", data_types)
        self.assertEqual(len(pixel_data_type), 1)
        pixel_data_type = pixel_data_type[0]
        self.assertEqual(pixel_data_type[1].type, pydp2.DataType.Dense)
        self.assertEqual(pixel_data_type[1].dim, 784)

        label_data_type = filter(lambda type: type[0] == "label", data_types)
        self.assertEqual(len(label_data_type), 1)
        label_data_type = label_data_type[0]
        self.assertEqual(label_data_type[1].type, pydp2.DataType.Index)
        self.assertEqual(label_data_type[1].dim, 10)
Exemplo n.º 32
0
    def __build_nn__(self):
        '''
        Build the network topology.
        '''
        # Get the image features with CNN.
        conv_features = self.conv_groups(self.image, conf.filter_num,
                                         conf.with_bn)

        # Expand the output of CNN into a sequence of feature vectors.
        sliced_feature = layer.block_expand(
            input=conv_features,
            num_channels=conf.num_channels,
            stride_x=conf.stride_x,
            stride_y=conf.stride_y,
            block_x=conf.block_x,
            block_y=conf.block_y)

        # Use RNN to capture sequence information forwards and backwards.
        gru_forward = simple_gru(
            input=sliced_feature, size=conf.hidden_size, act=Relu())
        gru_backward = simple_gru(
            input=sliced_feature,
            size=conf.hidden_size,
            act=Relu(),
            reverse=True)

        # Map the output of RNN to character distribution.
        self.output = layer.fc(input=[gru_forward, gru_backward],
                               size=self.num_classes + 1,
                               act=Linear())

        self.log_probs = paddle.layer.mixed(
            input=paddle.layer.identity_projection(input=self.output),
            act=paddle.activation.Softmax())

        # Use warp CTC to calculate cost for a CTC task.
        if not self.is_infer:
            self.cost = layer.warp_ctc(
                input=self.output,
                label=self.label,
                size=self.num_classes + 1,
                norm_by_times=conf.norm_by_times,
                blank=self.num_classes)

            self.eval = evaluator.ctc_error(input=self.output, label=self.label)
Exemplo n.º 33
0
    def __build_nn__(self):
        '''
        建立网络拓扑
        '''
        # 通过CNN获取图像特征
        conv_features = self.conv_groups(self.image, conf.filter_num,
                                         conf.with_bn)

        # 将CNN的输出展开成一系列特征向量。
        sliced_feature = layer.block_expand(
            input=conv_features,
            num_channels=conf.num_channels,
            stride_x=conf.stride_x,
            stride_y=conf.stride_y,
            block_x=conf.block_x,
            block_y=conf.block_y)

        # 使用RNN向前和向后捕获序列信息。
        gru_forward = simple_gru(
            input=sliced_feature, size=conf.hidden_size, act=Relu())
        gru_backward = simple_gru(
            input=sliced_feature,
            size=conf.hidden_size,
            act=Relu(),
            reverse=True)

        # 将RNN的输出映射到字符分布。
        self.output = layer.fc(input=[gru_forward, gru_backward],
                               size=self.num_classes + 1,
                               act=Linear())

        self.log_probs = paddle.layer.mixed(
            input=paddle.layer.identity_projection(input=self.output),
            act=paddle.activation.Softmax())

        # 使用扭曲CTC来计算CTC任务的成本。
        if not self.is_infer:
            self.cost = layer.warp_ctc(
                input=self.output,
                label=self.label,
                size=self.num_classes + 1,
                norm_by_times=conf.norm_by_times,
                blank=self.num_classes)

            self.eval = evaluator.ctc_error(input=self.output, label=self.label)
Exemplo n.º 34
0
    def test_initializer(self):
        def initializer(name):
            assert name == "fc.w"
            mat = numpy.ones((3, 2), dtype=numpy.float32)
            mat[1, 1] = 2
            return mat

        x = layer.data(name="x", type=data_type.dense_vector(3))
        y = layer.fc(x,
                     size=2,
                     bias_attr=False,
                     param_attr=ParamAttr(
                         name="fc.w", initializer=initializer))
        params = parameters.create(y)
        val = params["fc.w"]
        assert val.shape == (3, 2)
        expected = numpy.array([[1, 1], [1, 2], [1, 1]], numpy.float32)
        assert numpy.logical_and.reduce(numpy.reshape(val == expected, 6))
Exemplo n.º 35
0
    def test_initializer(self):
        def initializer(name):
            assert name == "fc.w"
            mat = numpy.ones((3, 2), dtype=numpy.float32)
            mat[1, 1] = 2
            return mat

        x = layer.data(name="x", type=data_type.dense_vector(3))
        y = layer.fc(x,
                     size=2,
                     bias_attr=False,
                     param_attr=ParamAttr(
                         name="fc.w", initializer=initializer))
        params = parameters.create(y)
        val = params["fc.w"]
        assert val.shape == (3, 2)
        expected = numpy.array([[1, 1], [1, 2], [1, 1]], numpy.float32)
        assert numpy.logical_and.reduce(numpy.reshape(val == expected, 6))
Exemplo n.º 36
0
import paddle.v2.activation as activation
import paddle.v2.attr as attr
import paddle.v2.data_type as data_type
import paddle.v2.layer as layer
import paddle.v2.pooling as pooling
import paddle.v2.networks as networks

pixel = layer.data(name='pixel', type=data_type.dense_vector(128))
label = layer.data(name='label', type=data_type.integer_value(10))
weight = layer.data(name='weight', type=data_type.dense_vector(1))
combine_weight = layer.data(name='weight_combine',
                            type=data_type.dense_vector(10))
score = layer.data(name='score', type=data_type.dense_vector(1))

hidden = layer.fc(input=pixel,
                  size=100,
                  act=activation.Sigmoid(),
                  param_attr=attr.Param(name='hidden'))
inference = layer.fc(input=hidden, size=10, act=activation.Softmax())
conv = layer.img_conv(input=pixel,
                      filter_size=1,
                      filter_size_y=1,
                      num_channels=8,
                      num_filters=16,
                      act=activation.Linear())


class ImageLayerTest(unittest.TestCase):
    def test_conv_layer(self):
        conv_shift = layer.conv_shift(a=pixel, b=score)
        print layer.parse_network(conv, conv_shift)
Exemplo n.º 37
0
import paddle.v2.attr as attr
import paddle.v2.data_type as data_type
import paddle.v2.layer as layer
import paddle.v2.pooling as pooling
import paddle.v2.networks as networks
import paddle.v2.evaluator as evaluator

pixel = layer.data(name='pixel', type=data_type.dense_vector(128))
label = layer.data(name='label', type=data_type.integer_value(10))
weight = layer.data(name='weight', type=data_type.dense_vector(1))
combine_weight = layer.data(
    name='weight_combine', type=data_type.dense_vector(10))
score = layer.data(name='score', type=data_type.dense_vector(1))

hidden = layer.fc(input=pixel,
                  size=100,
                  act=activation.Sigmoid(),
                  param_attr=attr.Param(name='hidden'))
inference = layer.fc(input=hidden, size=10, act=activation.Softmax())
conv = layer.img_conv(
    input=pixel,
    filter_size=1,
    filter_size_y=1,
    num_channels=8,
    num_filters=16,
    act=activation.Linear())


class ImageLayerTest(unittest.TestCase):
    def test_conv_layer(self):
        conv_shift = layer.conv_shift(a=pixel, b=score)
        print layer.parse_network(conv, conv_shift)