コード例 #1
0
    def check_adaptive_max_static_results(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[2, 3, 32], dtype="float32")
            result = F.adaptive_max_pool1d(input, output_size=16)

            input_np = np.random.random([2, 3, 32]).astype("float32")
            result_np = max_pool1D_forward_naive(
                input_np, ksize=[16], strides=[2], paddings=[0], adaptive=True)

            exe = fluid.Executor(place)
            fetches = exe.run(fluid.default_main_program(),
                              feed={"input": input_np},
                              fetch_list=[result])
            self.assertTrue(np.allclose(fetches[0], result_np))
コード例 #2
0
    def check_adaptive_max_dygraph_results(self, place):
        with fluid.dygraph.guard(place):
            input_np = np.random.random([2, 3, 32]).astype("float32")
            input = fluid.dygraph.to_variable(input_np)
            result = F.adaptive_max_pool1d(input, output_size=16)

            result_np = max_pool1D_forward_naive(
                input_np, ksize=[16], strides=[0], paddings=[0], adaptive=True)
            self.assertTrue(np.allclose(result.numpy(), result_np))

            ada_max_pool1d_dg = paddle.nn.layer.AdaptiveMaxPool1D(
                output_size=16)
            result = ada_max_pool1d_dg(input)
            self.assertTrue(np.allclose(result.numpy(), result_np))
コード例 #3
0
    def forward(self, inputs, mask=None):
        r"""
        The combination of multiple convolution layers and max pooling layers.

        Args:
            inputs (Tensor): 
                Shape as `(batch_size, num_tokens, emb_dim)` and dtype as `float32` or `float64`.
                Tensor containing the features of the input sequence. 
            mask (Tensor, optional): 
                Shape shoule be same as `inputs` and dtype as `int32`, `int64`, `float32` or `float64`. 
                Its each elements identify whether the corresponding input token is padding or not. 
                If True, not padding token. If False, padding token. 
                Defaults to `None`

        Returns:
            Tensor: 
                If output_dim is None, the result shape is of `(batch_size, output_dim)` and 
                dtype is `float`; 
                If not, the result shape is of `(batch_size, len(ngram_filter_sizes) * num_filter)`.

        """
        if mask is not None:
            inputs = inputs * mask

        # Shape: (batch_size, 1, num_tokens, emb_dim) = (N, C, H, W)
        inputs = inputs.unsqueeze(1)

        # If output_dim is None, result shape of (batch_size, len(ngram_filter_sizes) * num_filter));
        # else, result shape of (batch_size, output_dim).
        convs_out = [
            self._activation(conv(inputs)).squeeze(3) for conv in self.convs
        ]
        maxpool_out = [
            F.adaptive_max_pool1d(t, output_size=1).squeeze(2)
            for t in convs_out
        ]
        result = paddle.concat(maxpool_out, axis=1)

        if self.projection_layer is not None:
            result = self.projection_layer(result)
        return result
コード例 #4
0
    def forward(self, inputs, mask=None):
        """
        The combination of multiple convolution layers and max pooling layers.

        Args:
            inputs (paddle.Tensor): Shape as `(batch_size, num_tokens, emb_dim)`
            mask (obj: `paddle.Tensor`, optional, defaults to `None`): Shape same as `inputs`. 
                Its each elements identify whether is padding token or not. 
                If True, not padding token. If False, padding token.

        Returns:
            result (paddle.Tensor): If output_dim is None, the result shape 
                is of `(batch_size, output_dim)`; if not, the result shape 
                is of `(batch_size, len(ngram_filter_sizes) * num_filter)`.

        """
        if mask is not None:
            inputs = inputs * mask

        # Shape: (batch_size, 1, num_tokens, emb_dim) = (N, C, H, W)
        inputs = inputs.unsqueeze(1)

        # If output_dim is None, result shape of (batch_size, len(ngram_filter_sizes) * num_filter));
        # else, result shape of (batch_size, output_dim).
        convs_out = [
            self._activation(conv(inputs)).squeeze(3) for conv in self.convs
        ]
        maxpool_out = [
            F.adaptive_max_pool1d(
                t, output_size=1).squeeze(2) for t in convs_out
        ]
        result = paddle.concat(maxpool_out, axis=1)

        if self.projection_layer is not None:
            result = self.projection_layer(result)
        return result