Ejemplo n.º 1
0
 def test_error(self):
     with paddle.static.program_guard(paddle.static.Program()):
         weight_fp32 = paddle.data(
             name='weight_fp32', shape=[1], dtype='float32')
         # The input type must be Variable.
         self.assertRaises(TypeError, F.prelu, x=1, weight=weight_fp32)
         # The input dtype must be float16, float32, float64.
         x_int32 = paddle.data(name='x_int32', shape=[2, 3], dtype='int32')
         self.assertRaises(TypeError, F.prelu, x=x_int32, weight=weight_fp32)
         # support the input dtype is float16
         x_fp16 = paddle.data(name='x_fp16', shape=[2, 3], dtype='float16')
         F.prelu(x=x_fp16, weight=weight_fp32)
Ejemplo n.º 2
0
    def test_attr_tensor_API(self):
        startup_program = Program()
        train_program = Program()
        with program_guard(train_program, startup_program):
            fill_value = 2.0
            input = paddle.data(name='input', dtype='float32', shape=[2, 3])
            output = paddle.full_like(input, fill_value)
            output_dtype = paddle.full_like(input, fill_value, dtype='float32')

            place = paddle.CPUPlace()
            if core.is_compiled_with_cuda():
                place = paddle.CUDAPlace(0)
            exe = paddle.static.Executor(place)
            exe.run(startup_program)

            img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)

            res = exe.run(train_program,
                          feed={'input': img},
                          fetch_list=[output])

            out_np = np.array(res[0])
            self.assertTrue(not (out_np - np.full_like(img, fill_value)).any(),
                            msg="full_like output is wrong, out = " +
                            str(out_np))
Ejemplo n.º 3
0
    def test_api(self):
        shape = [3, 4]
        startup_program = Program()
        train_program = Program()
        with program_guard(train_program, startup_program):
            x = paddle.data('X', shape)

            # 'bool', 'float32', 'float64', 'int32', 'int64'
            out1 = zeros_like(x)
            out2 = zeros_like(x, np.bool)
            out3 = zeros_like(x, 'float64')
            out4 = zeros_like(x, 'int32')
            out5 = zeros_like(x, 'int64')

        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
        exe = fluid.Executor(place)
        outs = exe.run(train_program,
                       feed={'X': np.ones(shape).astype('float32')},
                       fetch_list=[out1, out2, out3, out4, out5])

        for i, dtype in enumerate(
            [np.float32, np.bool, np.float64, np.int32, np.int64]):
            self.assertEqual(outs[i].dtype, dtype)
            self.assertEqual((outs[i] == np.zeros(shape, dtype)).all(), True)
Ejemplo n.º 4
0
    def test_attr(self):
        x = paddle.data(name='x', shape=[10, 10], dtype='float64')

        def test_return_index():
            result = paddle.unique(x, return_index=0)

        self.assertRaises(TypeError, test_return_index)

        def test_return_inverse():
            result = paddle.unique(x, return_inverse='s')

        self.assertRaises(TypeError, test_return_inverse)

        def test_return_counts():
            result = paddle.unique(x, return_counts=3)

        self.assertRaises(TypeError, test_return_counts)

        def test_axis():
            result = paddle.unique(x, axis='12')

        def test_dtype():
            result = paddle.unique(x, dtype='float64')

        self.assertRaises(TypeError, test_axis)
Ejemplo n.º 5
0
    def test_with_zero_state(self):
        mp = self.mp.clone()
        sp = self.sp
        rnn1 = self.rnn1
        rnn2 = self.rnn2
        exe = self.executor
        scope = self.scope

        x = np.random.randn(4, 16)

        y1, (h1, c1) = rnn1(x)

        with paddle.fluid.unique_name.guard():
            with paddle.static.program_guard(mp, sp):
                x_data = paddle.data(
                    "input", [-1, 16],
                    dtype=paddle.framework.get_default_dtype())
                y, (h, c) = rnn2(x_data)

        feed_dict = {x_data.name: x}

        with paddle.static.scope_guard(scope):
            y2, h2, c2 = exe.run(mp,
                                 feed=feed_dict,
                                 fetch_list=[y, h, c],
                                 use_prune=True)

        np.testing.assert_allclose(h1, h2, atol=1e-8, rtol=1e-5)
        np.testing.assert_allclose(c1, c2, atol=1e-8, rtol=1e-5)
Ejemplo n.º 6
0
 def static(self):
     with paddle.static.program_guard(paddle.static.Program()):
         x = paddle.data('X', self.shape, self.dtype)
         out = paddle.var(x, self.axis, self.unbiased, self.keepdim)
         exe = paddle.static.Executor(self.place)
         res = exe.run(feed={'X': self.x}, fetch_list=[out])
     return res[0]
Ejemplo n.º 7
0
    def check_static_result_4(self, place):
        paddle.enable_static()
        with program_guard(Program(), Program()):
            input_shape = (2, 3, 4, 5, 6)
            pad = [1, 2, 1, 1, 3, 4]
            mode = "circular"
            input_data = np.random.rand(*input_shape).astype(np.float32)
            x = paddle.data(name="x", shape=input_shape)
            result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW")
            result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC")
            exe = Executor(place)
            fetches = exe.run(default_main_program(),
                              feed={"x": input_data},
                              fetch_list=[result1, result2])

            np_out1 = self._get_numpy_out(input_data,
                                          pad,
                                          mode,
                                          data_format="NCDHW")
            np_out2 = self._get_numpy_out(input_data,
                                          pad,
                                          mode,
                                          data_format="NDHWC")
            self.assertTrue(np.allclose(fetches[0], np_out1))
            self.assertTrue(np.allclose(fetches[1], np_out2))
Ejemplo n.º 8
0
    def test_api(self):
        x_1 = paddle.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1')
        paddle.concat([x_1, x_1], 0)

        input_2 = np.random.random([2, 1, 4, 5]).astype("int32")
        input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
        x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
        x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
        positive_1_int32 = paddle.fill_constant([1], "int32", 1)
        positive_1_int64 = paddle.fill_constant([1], "int64", 1)
        negative_int64 = paddle.fill_constant([1], "int64", -3)
        out_1 = paddle.concat(x=[x_2, x_3], axis=1)
        out_2 = paddle.concat(x=[x_2, x_3], axis=positive_1_int32)
        out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64)
        out_4 = paddle.concat(x=[x_2, x_3], axis=negative_int64)

        exe = paddle.static.Executor(place=paddle.CPUPlace())
        [res_1, res_2, res_3,
         res_4] = exe.run(paddle.static.default_main_program(),
                          feed={
                              "x_1": input_2,
                              "x_2": input_2,
                              "x_3": input_3
                          },
                          fetch_list=[out_1, out_2, out_3, out_4])
        assert np.array_equal(res_1, np.concatenate((input_2, input_3),
                                                    axis=1))
        assert np.array_equal(res_2, np.concatenate((input_2, input_3),
                                                    axis=1))
        assert np.array_equal(res_3, np.concatenate((input_2, input_3),
                                                    axis=1))
        assert np.array_equal(res_4, np.concatenate((input_2, input_3),
                                                    axis=1))
Ejemplo n.º 9
0
    def run_static(self, use_gpu=False):
        input = paddle.data(name='input', shape=[10, 10, 5], dtype='float32')
        result0 = paddle.prod(input)
        result1 = paddle.prod(input, axis=1)
        result2 = paddle.prod(input, axis=-1)
        result3 = paddle.prod(input, axis=[0, 1])
        result4 = paddle.prod(input, axis=1, keepdim=True)
        result5 = paddle.prod(input, axis=1, dtype='int64')
        result6 = paddle.prod(input, axis=1, keepdim=True, dtype='int64')

        place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace()
        exe = paddle.static.Executor(place)
        exe.run(paddle.static.default_startup_program())
        static_result = exe.run(feed={"input": self.input},
                                fetch_list=[
                                    result0, result1, result2, result3, result4,
                                    result5, result6
                                ])

        expected_result = np.prod(self.input)
        self.assertTrue(np.allclose(static_result[0], expected_result))
        expected_result = np.prod(self.input, axis=1)
        self.assertTrue(np.allclose(static_result[1], expected_result))
        expected_result = np.prod(self.input, axis=-1)
        self.assertTrue(np.allclose(static_result[2], expected_result))
        expected_result = np.prod(self.input, axis=(0, 1))
        self.assertTrue(np.allclose(static_result[3], expected_result))
        expected_result = np.prod(self.input, axis=1, keepdims=True)
        self.assertTrue(np.allclose(static_result[4], expected_result))
        expected_result = np.prod(self.input, axis=1, dtype=np.int64)
        self.assertTrue(np.allclose(static_result[5], expected_result))
        expected_result = np.prod(
            self.input, axis=1, keepdims=True, dtype=np.int64)
        self.assertTrue(np.allclose(static_result[6], expected_result))
Ejemplo n.º 10
0
    def test_error(self):
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            x = paddle.data(name='x', shape=[2, 2, 4], dtype='float32')
            bool_x = paddle.data(name='bool_x', shape=[2, 2, 4], dtype='bool')
            # The argument x shoule be a Tensor
            self.assertRaises(TypeError, paddle.prod, [1])

            # The data type of x should be float32, float64, int32, int64
            self.assertRaises(TypeError, paddle.prod, bool_x)

            # The argument axis's type shoule be int ,list or tuple
            self.assertRaises(TypeError, paddle.prod, x, 1.5)

            # The argument dtype of prod_op should be float32, float64, int32 or int64.
            self.assertRaises(TypeError, paddle.prod, x, 'bool')
Ejemplo n.º 11
0
        def test_x_dtype():
            with paddle.static.program_guard(paddle.static.Program(),
                                             paddle.static.Program()):
                x = paddle.data(name='x', shape=[10, 10], dtype='float16')
                result = paddle.unique(x)

            self.assertRaises(TypeError, test_x_dtype)
Ejemplo n.º 12
0
 def test_reflect_3():
     input_shape = (1, 2, 3, 4, 5)
     data = np.random.rand(*input_shape).astype(np.float32)
     x = paddle.data(name="x", shape=input_shape)
     y = F.pad(x, pad=[1, 1, 1, 1, 2, 3], value=1, mode='reflect')
     place = paddle.CPUPlace()
     exe = Executor(place)
     outputs = exe.run(feed={'x': data}, fetch_list=[y.name])
Ejemplo n.º 13
0
 def test_type():
     # dtype must be float32, float64, int8, int32, int64.
     x2 = np.arange(image_shape[0] * image_shape[1] * image_shape[2] *
                    image_shape[3]).reshape(image_shape) / 100.
     x2 = x2.astype('float16')
     x2_var = paddle.data(name='x2',
                          shape=[3, 2, 4, 5],
                          dtype='float16')
     paddle.flatten(x2_var)
Ejemplo n.º 14
0
    def test_attr(self):
        x = paddle.data(name='x', shape=[10, 10], dtype='float64')
        y = paddle.data(name='y', shape=[10, 10], dtype='float64')

        def test_rtol():
            result = paddle.allclose(x, y, rtol=True)

        self.assertRaises(TypeError, test_rtol)

        def test_atol():
            result = paddle.allclose(x, y, rtol=True)

        self.assertRaises(TypeError, test_atol)

        def test_equal_nan():
            result = paddle.allclose(x, y, equal_nan=1)

        self.assertRaises(TypeError, test_equal_nan)
Ejemplo n.º 15
0
 def test_out2(self):
     with paddle.static.program_guard(paddle.static.Program(),
                                      paddle.static.Program()):
         x = paddle.data('x', shape=[-1, 2], dtype='float64')
         index = paddle.data('index', shape=[-1, 1], dtype='int32')
         axis = paddle.data('axis', shape=[1], dtype='int32')
         out = paddle.gather(x, index, axis)
         place = paddle.CPUPlace()
         exe = paddle.static.Executor(place)
         x_np = np.array([[1, 2], [3, 4], [5, 6]]).astype('float64')
         index_np = np.array([1, 1]).astype('int32')
         axis_np = np.array([1]).astype('int32')
         result, = exe.run(
             feed={"x": x_np,
                   "index": index_np,
                   'axis': axis_np},
             fetch_list=[out])
         expected_output = gather_numpy(x_np, index_np, axis_np)
     self.assertTrue(np.allclose(result, expected_output))
Ejemplo n.º 16
0
def test_static(x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False):
    prog = paddle.static.Program()
    startup_prog = paddle.static.Program()

    place = fluid.CUDAPlace(0) if paddle.fluid.core.is_compiled_with_cuda(
    ) else fluid.CPUPlace()

    with paddle.static.program_guard(prog, startup_prog):
        x = paddle.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
        y = paddle.data(name='y', shape=y_np.shape, dtype=x_np.dtype)
        dist = paddle.nn.layer.distance.PairwiseDistance(
            p=p, epsilon=epsilon, keepdim=keepdim)
        distance = dist(x, y)
        exe = paddle.static.Executor(place)
        static_ret = exe.run(prog,
                             feed={'x': x_np,
                                   'y': y_np},
                             fetch_list=[distance])
        static_ret = static_ret[0]
    return static_ret
Ejemplo n.º 17
0
    def test_l2_loss(self):
        student_main = fluid.Program()
        student_startup = fluid.Program()
        with fluid.program_guard(student_main, student_startup):
            input = paddle.data(name="image", shape=[None, 3, 224, 224])
            conv1 = conv_bn_layer(input, 8, 3, "conv1")
            conv2 = conv_bn_layer(conv1, 8, 3, "conv2")
            student_predict = conv1 + conv2

        teacher_main = fluid.Program()
        teacher_startup = fluid.Program()
        with fluid.program_guard(teacher_main, teacher_startup):
            input = paddle.data(name="image", shape=[None, 3, 224, 224])
            conv1 = conv_bn_layer(input, 8, 3, "conv1")
            conv2 = conv_bn_layer(conv1, 8, 3, "conv2")
            sum1 = conv1 + conv2
            conv3 = conv_bn_layer(sum1, 8, 3, "conv3")
            conv4 = conv_bn_layer(conv3, 8, 3, "conv4")
            sum2 = conv4 + sum1
            conv5 = conv_bn_layer(sum2, 8, 3, "conv5")
            teacher_predict = conv_bn_layer(conv5, 8, 3, "conv6")

        place = fluid.CPUPlace()
        data_name_map = {'image': 'image'}
        merge(teacher_main, student_main, data_name_map, place)
        merged_ops = []
        for block in student_main.blocks:
            for op in block.ops:
                merged_ops.append(op.type)
        with fluid.program_guard(student_main):
            distill_loss = l2_loss('teacher_conv6_bn_output.tmp_2',
                                   'conv2_bn_output.tmp_2', student_main)
        loss_ops = []
        for block in student_main.blocks:
            for op in block.ops:
                loss_ops.append(op.type)
        self.assertTrue(set(merged_ops).difference(set(loss_ops)) == set())
        self.assertTrue(
            set(loss_ops).difference(set(merged_ops)) ==
            {'reduce_mean', 'square', 'elementwise_sub'})
Ejemplo n.º 18
0
    def api_case(self, axis=None, keepdim=False):
        out_ref = ref_logsumexp(self.x, axis, keepdim)
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.data('X', self.shape)
            out = paddle.logsumexp(x, axis, keepdim)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x}, fetch_list=[out])
        self.assertTrue(np.allclose(res[0], out_ref))

        paddle.disable_static(self.place)
        x = paddle.to_variable(self.x)
        out = paddle.logsumexp(x, axis, keepdim)
        self.assertTrue(np.allclose(out.numpy(), out_ref))
        paddle.enable_static()