Beispiel #1
0
        def parse_new_rnn():
            reset_parser()
            data = layer.data(
                name="word", type=data_type.dense_vector(dict_dim))
            label = layer.data(
                name="label", type=data_type.dense_vector(label_dim))
            emb = layer.embedding(input=data, size=word_dim)
            boot_layer = layer.data(
                name="boot", type=data_type.dense_vector(10))
            boot_layer = layer.fc(name='boot_fc', input=boot_layer, size=10)

            def step(y, wid):
                z = layer.embedding(input=wid, size=word_dim)
                mem = layer.memory(
                    name="rnn_state", size=hidden_dim, boot_layer=boot_layer)
                out = layer.fc(input=[y, z, mem],
                               size=hidden_dim,
                               act=activation.Tanh(),
                               bias_attr=True,
                               name="rnn_state")
                return out

            out = layer.recurrent_group(
                name="rnn", step=step, input=[emb, data])

            rep = layer.last_seq(input=out)
            prob = layer.fc(size=label_dim,
                            input=rep,
                            act=activation.Softmax(),
                            bias_attr=True)

            cost = layer.classification_cost(input=prob, label=label)

            return str(layer.parse_network(cost))
Beispiel #2
0
 def _attention_flow(self, h, u):
     bs = layer.recurrent_group(input=[h, layer.StaticInput(u)],
                                step=self._h_step,
                                reverse=False)
     b_weights = layer.mixed(act=Act.SequenceSoftmax(),
                             bias_attr=False,
                             input=layer.identity_projection(bs))
     h_step_scaled = layer.scaling(input=h, weight=b_weights)
     h_step = layer.pooling(input=h_step_scaled,
                            pooling_type=paddle.pooling.Sum())
     h_expr = layer.expand(input=h_step, expand_as=h)
     u_expr = layer.recurrent_group(input=[h, layer.StaticInput(u)],
                                    step=self._u_step,
                                    reverse=False)
     g = self._beta(h, u_expr, h_expr)
     return g
Beispiel #3
0
        def parse_new_rnn():
            data = layer.data(name="word",
                              type=data_type.dense_vector(dict_dim))
            label = layer.data(name="label",
                               type=data_type.dense_vector(label_dim))
            emb = layer.embedding(input=data, size=word_dim)
            boot_layer = layer.data(name="boot",
                                    type=data_type.dense_vector(10))
            boot_layer = layer.fc(name='boot_fc', input=boot_layer, size=10)

            def step(y, wid):
                z = layer.embedding(input=wid, size=word_dim)
                mem = layer.memory(name="rnn_state",
                                   size=hidden_dim,
                                   boot_layer=boot_layer)
                out = layer.fc(input=[y, z, mem],
                               size=hidden_dim,
                               act=activation.Tanh(),
                               bias_attr=True,
                               name="rnn_state")
                return out

            out = layer.recurrent_group(name="rnn",
                                        step=step,
                                        input=[emb, data])

            rep = layer.last_seq(input=out)
            prob = layer.fc(size=label_dim,
                            input=rep,
                            act=activation.Softmax(),
                            bias_attr=True)

            cost = layer.classification_cost(input=prob, label=label)

            return str(layer.parse_network(cost))
Beispiel #4
0
        def parse_new_rnn():
            def new_step(y):
                mem = layer.memory(name="rnn_state", size=hidden_dim)
                out = layer.fc(input=[y, mem],
                               size=hidden_dim,
                               act=activation.Tanh(),
                               bias_attr=True,
                               name="rnn_state")
                return out

            data = layer.data(
                name="word", type=data_type.integer_value(dict_dim))
            embd = layer.embedding(input=data, size=word_dim)
            rnn_layer = layer.recurrent_group(
                name="rnn", step=new_step, input=embd)
            return str(layer.parse_network(rnn_layer))
Beispiel #5
0
        def parse_new_rnn():
            def new_step(y):
                mem = layer.memory(name="rnn_state", size=hidden_dim)
                out = layer.fc(input=[y, mem],
                               size=hidden_dim,
                               act=activation.Tanh(),
                               bias_attr=True,
                               name="rnn_state")
                return out

            data = layer.data(name="word",
                              type=data_type.integer_value(dict_dim))
            embd = layer.embedding(input=data, size=word_dim)
            rnn_layer = layer.recurrent_group(name="rnn",
                                              step=new_step,
                                              input=embd)
            return str(layer.parse_network(rnn_layer))
Beispiel #6
0
    def recurrent_group(self, name, inputs, reverse=False):
        """
        Implements the Match-LSTM layer in the paper.

        Args:
            name: the name prefix of the layers created by this method.
            inputs: the inputs takes by the _step method.
            reverse: True if the paragraph encoding is processed from right
                     to left, otherwise the paragraph encoding is processed
                     from left to right.
        Returns:
            The Match-LSTM layer's output of one direction.
        """
        inputs.insert(0, name)
        seq_out = layer.recurrent_group(name=name,
                                        input=inputs,
                                        step=self._step,
                                        reverse=reverse)
        return seq_out
Beispiel #7
0
    def recurrent_group(self, name, inputs, reverse=False):
        """
        Implements the Match-LSTM layer in the paper.

        Args:
            name: the name prefix of the layers created by this method.
            inputs: the inputs takes by the _step method.
            reverse: True if the paragraph encoding is processed from right
                     to left, otherwise the paragraph encoding is processed
                     from left to right.
        Returns:
            The Match-LSTM layer's output of one direction.
        """
        inputs.insert(0, name)
        seq_out = layer.recurrent_group(name=name,
                                        input=inputs,
                                        step=self._step,
                                        reverse=reverse)
        return seq_out