示例#1
0
    def forward(self, pinyin_ids):
        """
        Args:
            pinyin_ids (Tensor): Its shape is (bs*sentence_length*pinyin_locs).

        Returns:
            pinyin_embed (Tensor): Its shape is (bs,sentence_length,pinyin_out_dim).

        """
        # input pinyin ids for 1-D conv
        embed = self.embedding(
            pinyin_ids)  # [bs,sentence_length*pinyin_locs,embed_size]
        bs, sentence_length, pinyin_locs, embed_size = embed.shape
        view_embed = embed.reshape(shape=[
            -1, pinyin_locs, embed_size
        ])  # [(bs*sentence_length),pinyin_locs,embed_size]
        input_embed = view_embed.transpose(
            [0, 2, 1])  # [(bs*sentence_length), embed_size, pinyin_locs]
        # conv + max_pooling
        pinyin_conv = self.conv(
            input_embed)  # [(bs*sentence_length),pinyin_out_dim,H]
        pinyin_embed = F.max_pool1d(
            pinyin_conv,
            pinyin_conv.shape[-1])  # [(bs*sentence_length),pinyin_out_dim,1]
        return pinyin_embed.reshape(
            shape=[bs, sentence_length,
                   self.pinyin_out_dim])  # [bs,sentence_length,pinyin_out_dim]
示例#2
0
 def run3():
     with fluid.dygraph.guard():
         input_np = np.random.uniform(-1, 1,
                                      [2, 3, 32]).astype(np.float32)
         input_pd = fluid.dygraph.to_variable(input_np)
         padding = "padding"
         res_pd = F.max_pool1d(
             input_pd, kernel_size=2, stride=2, padding=padding)
示例#3
0
    def check_max_dygraph_padding_same(self, place):
        with fluid.dygraph.guard(place):
            input_np = np.random.random([2, 3, 32]).astype("float32")
            input = fluid.dygraph.to_variable(input_np)
            result = F.max_pool1d(
                input, kernel_size=2, stride=2, padding="SAME")

            result_np = max_pool1D_forward_naive(
                input_np, ksize=[2], strides=[2], paddings=[0])

            self.assertTrue(np.allclose(result.numpy(), result_np))
示例#4
0
    def check_max_static_results(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[2, 3, 32], dtype="float32")
            result = F.max_pool1d(input, kernel_size=2, stride=2, padding=[0])

            input_np = np.random.random([2, 3, 32]).astype("float32")
            result_np = max_pool1D_forward_naive(
                input_np, ksize=[2], strides=[2], paddings=[0])

            exe = fluid.Executor(place)
            fetches = exe.run(fluid.default_main_program(),
                              feed={"input": input_np},
                              fetch_list=[result])
            self.assertTrue(np.allclose(fetches[0], result_np))
示例#5
0
    def check_max_dygraph_results(self, place):
        with fluid.dygraph.guard(place):
            input_np = np.random.random([2, 3, 32]).astype("float32")
            input = fluid.dygraph.to_variable(input_np)
            result = F.max_pool1d(input, kernel_size=2, stride=2, padding=0)

            result_np = max_pool1D_forward_naive(
                input_np, ksize=[2], strides=[2], paddings=[0])

            self.assertTrue(np.allclose(result.numpy(), result_np))

            max_pool1d_dg = paddle.nn.layer.MaxPool1D(
                kernel_size=2, stride=None, padding=0)
            result = max_pool1d_dg(input)
            self.assertTrue(np.allclose(result.numpy(), result_np))
示例#6
0
    def forward(self, inputs):
        emb = self.embedding(inputs)
        emb = emb.unsqueeze(1)
        # convolution layer
        convs_out = [
            self.conv_layer_activation(conv(emb)).squeeze(3)
            for conv in self.convs
        ]
        # pool layer
        maxpool_out = [
            F.max_pool1d(t, kernel_size=t.shape[2]).squeeze(2)
            for t in convs_out
        ]

        conv_pool_out = paddle.concat(maxpool_out, axis=1)
        conv_pool_out = self.projection_layer(conv_pool_out)

        act_out = paddle.tanh(conv_pool_out)

        logits = self.output_layer(act_out)
        return logits
示例#7
0
    def test_case(self):
        places = [paddle.CPUPlace()]
        if paddle.fluid.core.is_compiled_with_cuda():
            places.append(paddle.CUDAPlace(0))
        for place in places:
            paddle.disable_static()
            input_data = np.random.rand(1, 3, 16)
            input_x = paddle.to_tensor(input_data)
            output, indices = F.max_pool1d(input_x,
                                           kernel_size=2,
                                           stride=2,
                                           return_mask=True)
            output_unpool = F.max_unpool1d(output,
                                           indices,
                                           kernel_size=2,
                                           stride=None)
            expected_output_unpool = unpool1dmax_forward_naive(
                output.numpy(), indices.numpy(), [2], [2], [0], [16])
            self.assertTrue(
                np.allclose(output_unpool.numpy(), expected_output_unpool))

        paddle.enable_static()
示例#8
0
文件: encoder.py 项目: wbj0110/models
    def forward(self, inputs, mask=None):
        """
        The combination of multiple convolution layers and max pooling layers.

        Args:
            inputs (obj: `paddle.Tensor`, required): Shape as `(batch_size, num_tokens, emb_dim)`
            mask (obj: `paddle.Tensor`, optional, defaults to `None`): Shape same as `inputs`. 
                Its each elements identify whether is padding token or not. 
                If True, not padding token. If False, padding token.

        Returns:
            result (obj: `paddle.Tensor`): If output_dim is None, the result shape 
                is of `(batch_size, output_dim)`; if not, the result shape 
                is of `(batch_size, len(ngram_filter_sizes) * num_filter)`.

        """
        if mask is not None:
            inputs = inputs * mask

        # Shape: (batch_size, 1, num_tokens, emb_dim) = (N, C, H, W)
        inputs = inputs.unsqueeze(1)

        # If output_dim is None, result shape of (batch_size, len(ngram_filter_sizes) * num_filter));
        # else, result shape of (batch_size, output_dim).
        convs_out = [
            self._activation(conv(inputs)).squeeze(3) for conv in self.convs
        ]
        maxpool_out = [
            F.max_pool1d(t, kernel_size=t.shape[2]).squeeze(2)
            for t in convs_out
        ]
        result = paddle.concat(maxpool_out, axis=1)

        if self.projection_layer is not None:
            result = self.projection_layer(result)
        return result
示例#9
0
    def test_case(self):
        paddle.enable_static()
        places = [paddle.CPUPlace()]
        if paddle.fluid.core.is_compiled_with_cuda():
            places.append(paddle.CUDAPlace(0))
        for place in places:
            with paddle.static.program_guard(paddle.static.Program(),
                                             paddle.static.Program()):

                input_data = np.array([[[1, 2, 3, 4], [5, 6, 7, 8],
                                        [9, 10, 11, 12]]]).astype("float32")
                x = paddle.fluid.data(name='x',
                                      shape=[1, 3, 4],
                                      dtype='float32')
                output, indices = F.max_pool1d(x,
                                               kernel_size=2,
                                               stride=2,
                                               return_mask=True)
                output_unpool = F.max_unpool1d(output,
                                               indices,
                                               kernel_size=2,
                                               stride=None)

                exe = paddle.fluid.Executor(place)
                fetches = exe.run(paddle.fluid.default_main_program(),
                                  feed={"x": input_data},
                                  fetch_list=[output_unpool],
                                  return_numpy=True)
                pool1d_out_np = np.array([[[2., 4.], [6., 8.],
                                           [10., 12.]]]).astype("float32")
                indices_np = np.array([[[1, 3], [1, 3], [1,
                                                         3]]]).astype("int32")
                expected_output_unpool = unpool1dmax_forward_naive(
                    pool1d_out_np, indices_np, [2], [2], [0], [4])
                self.assertTrue(np.allclose(fetches[0],
                                            expected_output_unpool))