Esempio n. 1
0
    def test_scalar_div_tensor(self):
        # scalar(int) / tensor(int64)
        with program_guard(Program()):
            a = 1
            b = paddle.full([2, 2, 2], 2, dtype='int64')
            c = paddle.full([2, 2, 2], 0.5, dtype="float32")
            self.check_operation(a, b, c, '/')

        # scalar(int) / tensor(float32)
        with program_guard(Program()):
            a = 1
            b = paddle.full([2, 2, 2], 0.5, dtype='float32')
            c = paddle.full([2, 2, 2], 2, dtype="float32")
            self.check_operation(a, b, c, '/')

        # scalar(float) / tensor(int64)
        with program_guard(Program()):
            a = 1.0
            b = paddle.full([2, 2, 2], 2, dtype='int64')
            c = paddle.full([2, 2, 2], 0.5, dtype="float32")
            self.check_operation(a, b, c, '/')

        # scalar(float) / tensor(float32)
        with program_guard(Program()):
            a = 1.0
            b = paddle.full([2, 2, 2], 0.5, dtype='float32')
            c = paddle.full([2, 2, 2], 2, dtype="float32")
            self.check_operation(a, b, c, '/')
Esempio n. 2
0
    def test_tensor_div_scalar(self):
        # tensor(int64) / scalar(int)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='int64')
            b = 2
            c = paddle.full([2, 2, 2], 0.5, dtype="float32")
            self.check_operation(a, b, c, '/')

        # tensor(float32) / scalar(int)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='float32')
            b = 2
            c = paddle.full([2, 2, 2], 0.5, dtype="float32")
            self.check_operation(a, b, c, '/')

        # tensor(int64) / scalar(float, .0)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='int64')
            b = 2.0
            c = paddle.full([2, 2, 2], 0.5, dtype="float32")
            self.check_operation(a, b, c, '/')

        # tensor(int64) / scalar(float, .5)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='int64')
            b = 0.5
            c = paddle.full([2, 2, 2], 2, dtype="float32")
            self.check_operation(a, b, c, '/')

        # tensor(float32) / scalar(float)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='float32')
            b = 0.5
            c = paddle.full([2, 2, 2], 2, dtype="float32")
            self.check_operation(a, b, c, '/')
Esempio n. 3
0
    def test_scalar_sub_tensor(self):
        # scalar(int) - tensor(int64)
        with program_guard(Program()):
            a = 1
            b = paddle.ones([2, 2, 2], dtype='int64')
            c = paddle.zeros([2, 2, 2], dtype="int64")
            self.check_operation(a, b, c, '-')

        # scalar(int) - tensor(float32)
        with program_guard(Program()):
            a = 1
            b = paddle.ones([2, 2, 2], dtype='float32')
            c = paddle.zeros([2, 2, 2], dtype="float32")
            self.check_operation(a, b, c, '-')

        # scalar(float, .0) - tensor(int64)
        with program_guard(Program()):
            a = 1.0
            b = paddle.ones([2, 2, 2], dtype='int64')
            c = paddle.zeros([2, 2, 2], dtype="float32")
            self.check_operation(a, b, c, '-')

        # scalar(float, .5) - tensor(int64)
        with program_guard(Program()):
            a = 1.5
            b = paddle.full([2, 2, 2], 2, dtype='int64')
            c = paddle.full([2, 2, 2], -0.5, dtype="float32")
            self.check_operation(a, b, c, '-')

        # scalar(float) - tensor(float32)
        with program_guard(Program()):
            a = 1.5
            b = paddle.full([2, 2, 2], 2, dtype='float32')
            c = paddle.full([2, 2, 2], -0.5, dtype="float32")
            self.check_operation(a, b, c, '-')
Esempio n. 4
0
    def _test_api(self):
        paddle.enable_static()
        input = np.random.random([2, 25]).astype("float32")
        shape = [2, 5, 5]
        main_prog = Program()
        with program_guard(main_prog, Program()):
            positive_five = self.fill_constant([1], "int32", 5)
            x = self.data(name="x", shape=[2, 25], dtype="float32")

            actual_shape = self.data(name="shape", shape=[3], dtype="int32")

            # situation 1: have shape( list, no tensor), no actual shape(Tensor)
            out_1 = self.reshape(x, shape)

            # situation 2: have shape(list, no tensor), have actual shape(Tensor)
            out_2 = fluid.layers.reshape(
                x, shape=shape, actual_shape=actual_shape)

            # Situation 3: have shape(list, have tensor), no actual shape(Tensor)
            out_3 = self.reshape(x, shape=[positive_five, 10])

            # Situation 4: have shape(Tensor), no actual shape(Tensor)
            out_4 = self.reshape(x, shape=actual_shape)

        exe = paddle.static.Executor(place=paddle.CPUPlace())
        res_1, res_2, res_3, res_4 = exe.run(
            main_prog,
            feed={"x": input,
                  "shape": np.array([2, 5, 5]).astype("int32")},
            fetch_list=[out_1, out_2, out_3, out_4])

        assert np.array_equal(res_1, input.reshape(shape))
        assert np.array_equal(res_2, input.reshape(shape))
        assert np.array_equal(res_3, input.reshape([5, 10]))
        assert np.array_equal(res_4, input.reshape(shape))
Esempio n. 5
0
    def test_scalar_pow_tensor(self):
        # scalar(int) ** tensor(int64)
        with program_guard(Program()):
            a = 3
            b = paddle.full([2, 2, 2], 2, dtype='int64')
            c = paddle.full([2, 2, 2], 9, dtype="int64")
            self.check_operation(a, b, c, '**')

        # scalar(float) ** tensor(int64)
        with program_guard(Program()):
            a = 3.0
            b = paddle.full([2, 2, 2], 2, dtype='int64')
            c = paddle.full([2, 2, 2], 9, dtype="float32")
            self.check_operation(a, b, c, '**')

        # scalar(int) ** tensor(float32)
        with program_guard(Program()):
            a = 3
            b = paddle.full([2, 2, 2], 2, dtype='float32')
            c = paddle.full([2, 2, 2], 9, dtype="float32")
            self.check_operation(a, b, c, '**')

        # tensor(float32) ** scalar(float)
        with program_guard(Program()):
            a = 3.0
            b = paddle.full([2, 2, 2], 2, dtype='float32')
            c = paddle.full([2, 2, 2], 9, dtype="float32")
            self.check_operation(a, b, c, '**')
Esempio n. 6
0
    def test_tensor_sub_scalar(self):
        # tensor(int64) - scalar(int)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='int64')
            b = 1
            c = paddle.zeros([2, 2, 2], dtype="int64")
            self.check_operation(a, b, c, '-')

        # tensor(float32) - scalar(int)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='float32')
            b = 1
            c = paddle.zeros([2, 2, 2], dtype="float32")
            self.check_operation(a, b, c, '-')

        # tensor(int64) - scalar(float, .0)
        with program_guard(Program()):
            a = paddle.ones([2, 2, 2], dtype='int64')
            b = 1.0
            c = paddle.zeros([2, 2, 2], dtype="float32")
            self.check_operation(a, b, c, '-')

        # tensor(int64) - scalar(float, .5)
        with program_guard(Program()):
            a = paddle.full([2, 2, 2], 2, dtype='int64')
            b = 1.5
            c = paddle.full([2, 2, 2], 0.5, dtype="float32")
            self.check_operation(a, b, c, '-')

        # tensor(float32) - scalar(float)
        with program_guard(Program()):
            a = paddle.full([2, 2, 2], 2, dtype='float32')
            b = 1.5
            c = paddle.full([2, 2, 2], 0.5, dtype="float32")
            self.check_operation(a, b, c, '-')
    def test_static_graph(self):
        paddle.enable_static()

        dtype = 'float32'

        train_program = Program()
        startup_program = Program()

        with program_guard(train_program, startup_program):
            x = np.random.random(self.x_shape).astype(dtype)
            data_x = paddle.static.data('x',
                                        shape=self.data_x_shape,
                                        dtype=dtype)

            out = paddle.empty_like(data_x)

        place = paddle.CUDAPlace(
            0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
        exe = paddle.static.Executor(place)
        res = exe.run(train_program, feed={'x': x}, fetch_list=[out])

        self.dst_dtype = dtype
        self.dst_shape = x.shape
        self.__check_out__(res[0])

        paddle.disable_static()
Esempio n. 8
0
    def test_attr_tensor_API(self):
        startup_program = Program()
        train_program = Program()
        with program_guard(train_program, startup_program):
            fill_value = 2.0
            input = paddle.fluid.data(name='input',
                                      dtype='float32',
                                      shape=[2, 3])
            output = paddle.full_like(input, fill_value)
            output_dtype = paddle.full_like(input, fill_value, dtype='float32')

            place = paddle.CPUPlace()
            if core.is_compiled_with_cuda():
                place = paddle.CUDAPlace(0)
            exe = paddle.static.Executor(place)
            exe.run(startup_program)

            img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)

            res = exe.run(train_program,
                          feed={'input': img},
                          fetch_list=[output])

            out_np = np.array(res[0])
            self.assertTrue(not (out_np - np.full_like(img, fill_value)).any(),
                            msg="full_like output is wrong, out = " +
                            str(out_np))
Esempio n. 9
0
    def test_static_graph(self):
        for x_stop_gradient in [False, True]:
            for vec_stop_gradient in [False, True]:

                paddle.enable_static()

                train_program = Program()
                startup_program = Program()

                self.input_x = np.random.rand(5, 100).astype("float64")
                self.input_vec = np.random.rand(100).astype("float64")

                with program_guard(train_program, startup_program):
                    data_x = paddle.static.data("x",
                                                shape=[5, 100],
                                                dtype="float64")
                    data_vec = paddle.static.data("vec",
                                                  shape=[100],
                                                  dtype="float64")

                    data_x.stop_gradient = x_stop_gradient
                    data_vec.stop_gradient = vec_stop_gradient

                    result_vec = paddle.mv(data_x, data_vec)

                    self.place = paddle.CPUPlace()
                    exe = paddle.static.Executor(self.place)
                    res, = exe.run(feed={
                        "x": self.input_x,
                        "vec": self.input_vec
                    },
                                   fetch_list=[result_vec])
                    z_expected = np.array(np.dot(self.input_x, self.input_vec))
                    self.assertTrue(np.allclose(res, z_expected))
Esempio n. 10
0
    def test_api(self):
        shape = [1000, 784]
        train_program = Program()
        startup_program = Program()
        with program_guard(train_program, startup_program):
            x1 = paddle.randn(shape, 'float32')
            x2 = paddle.randn(shape, 'float64')

            dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 20)
            dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50)
            x3 = paddle.randn([dim_1, dim_2, 784])

            var_shape = paddle.static.data('X', [2], 'int32')
            x4 = paddle.randn(var_shape)

        place = paddle.CUDAPlace(
            0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
        exe = paddle.static.Executor(place)
        res = exe.run(train_program,
                      feed={'X': np.array(shape, dtype='int32')},
                      fetch_list=[x1, x2, x3, x4])

        for out in res:
            self.assertAlmostEqual(np.mean(out), .0, delta=0.1)
            self.assertAlmostEqual(np.std(out), 1., delta=0.1)
 def run_static_api(self, place):
     paddle.enable_static()
     expected = calc_margin_rank_loss(self.x_data,
                                      self.y_data,
                                      self.label_data,
                                      margin=margin,
                                      reduction=reduction)
     with program_guard(Program(), Program()):
         x = paddle.static.data(name="x",
                                shape=[10, 10],
                                dtype="float64")
         y = paddle.static.data(name="y",
                                shape=[10, 10],
                                dtype="float64")
         label = paddle.static.data(name="label",
                                    shape=[10, 10],
                                    dtype="float64")
         margin_rank_loss = paddle.nn.loss.MarginRankingLoss(
             margin=margin, reduction=reduction)
         result = margin_rank_loss(x, y, label)
         exe = paddle.static.Executor(place)
         result_numpy, = exe.run(feed={
             "x": self.x_data,
             "y": self.y_data,
             "label": self.label_data
         },
                                 fetch_list=[result])
         self.assertTrue(np.allclose(result_numpy, expected))
         self.assertTrue('loss' in result.name)
Esempio n. 12
0
    def test_tensor_mod_scalar(self):
        # tensor(int64) % scalar(int)
        with program_guard(Program()):
            a = paddle.full([2, 2, 2], 3, dtype='int64')
            b = 2
            c = paddle.full([2, 2, 2], 1, dtype="int64")
            self.check_operation(a, b, c, '%')

        # tensor(int64) % scalar(float)
        with program_guard(Program()):
            a = paddle.full([2, 2, 2], 3, dtype='int64')
            b = 2.0
            c = paddle.full([2, 2, 2], 1, dtype="float32")
            self.check_operation(a, b, c, '%')

        # tensor(float32) % scalar(int)
        with program_guard(Program()):
            a = paddle.full([2, 2, 2], 3, dtype='float32')
            b = 2
            c = paddle.full([2, 2, 2], 1, dtype="float32")
            self.check_operation(a, b, c, '%')

        # tensor(float32) % scalar(float)
        with program_guard(Program()):
            a = paddle.full([2, 2, 2], 3, dtype='float32')
            b = 2.0
            c = paddle.full([2, 2, 2], 1, dtype="float32")
            self.check_operation(a, b, c, '%')
Esempio n. 13
0
 def test_errors(self):
     with program_guard(Program(), Program()):
         # The input type of Print_op must be Variable.
         x1 = fluid.create_lod_tensor(
             np.array([[-1]]), [[1]], paddle.CPUPlace())
         self.assertRaises(TypeError, paddle.static.Print, x1)
         # The input dtype of Print_op must be float32, float64, int32_t, int64_t or bool.
         x2 = paddle.static.data(name='x2', shape=[4], dtype="float16")
         self.assertRaises(TypeError, paddle.static.Print, x2)
Esempio n. 14
0
    def _test_errors(self):
        with program_guard(Program(), Program()):
            # The x type of reshape_op must be Variable.
            def test_x_type():
                x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
                                             paddle.CPUPlace())
                self.reshape(x1, shape=[1])

            self.assertRaises(TypeError, test_x_type)

            # The x dtype of reshape_op must be float16, float32, float64, int32 or int64.
            def test_x_dtype():
                x2 = self.data(name="x2", shape=[2, 25], dtype="int8")
                self.reshape(x2, shape=[2, 5, 5])

            self.assertRaises(TypeError, test_x_dtype)

            def test_x_dtype_float16():
                x_float16 = self.data(name="x_float16",
                                      shape=[2, 25],
                                      dtype="float16")
                self.reshape(x_float16, shape=[2, 5, 5])

            test_x_dtype_float16()

            x3 = self.data(name="x3", shape=[2, 25], dtype="float32")

            # The argument shape's type of reshape_op must be list, tuple or Variable.
            def test_shape_type():
                self.reshape(x3, shape=1)

            self.assertRaises(TypeError, test_shape_type)

            # The argument actual_shape's type of reshape_op must be Variable or None.
            def test_actual_shape_type():
                self.reshape(x3, shape=[25, 2], actual_shape=1)

            self.assertRaises(TypeError, test_actual_shape_type)

            # The argument shape have more than one -1.
            def test_shape_1():
                self.reshape(x3, shape=[-1, -1, 5])

            self.assertRaises(AssertionError, test_shape_1)

            # The argument shape have element 0 whose index exceed the input dimension.
            def test_shape_2():
                self.reshape(x3, [2, 5, 5, 0])

            self.assertRaises(AssertionError, test_shape_2)

            # The argument shape have more than one negative value.
            def test_shape_3():
                self.reshape(x3, [-1, -2, 5])

            self.assertRaises(AssertionError, test_shape_3)
Esempio n. 15
0
    def test_error(self):
        with program_guard(Program(), Program()):
            # The argument shape's size of randn_op should not be 0.
            self.assertRaises(AssertionError, paddle.randn, [])

            # The argument shape's type of randn_op should be list or tuple.
            self.assertRaises(TypeError, paddle.randn, 1)

            # The argument dtype of randn_op should be float32 or float64.
            self.assertRaises(TypeError, paddle.randn, [1, 2], 'int32')
Esempio n. 16
0
    def test_out(self):
        with program_guard(Program(), Program()):
            x1 = paddle.arange(0, 5, 1, 'float32')

            place = paddle.CUDAPlace(
                0) if core.is_compiled_with_cuda() else paddle.CPUPlace()
            exe = paddle.static.Executor(place)
            out = exe.run(fetch_list=[x1])

        expected_data = np.arange(0, 5, 1).astype(np.float32)
        self.assertEqual((out == expected_data).all(), True)
Esempio n. 17
0
    def test_api(self):
        paddle.enable_static()
        input = np.random.random([2, 25]).astype("float32")
        main_prog = Program()
        with program_guard(main_prog, Program()):
            x = paddle.static.data(name="x", shape=[2, 25], dtype="float32")
            out = self._executed_api(x, scale=2.0, bias=3.0)

        exe = paddle.static.Executor(place=paddle.CPUPlace())
        out = exe.run(main_prog, feed={"x": input}, fetch_list=[out])
        self.assertEqual(np.array_equal(out[0], input * 2.0 + 3.0), True)
Esempio n. 18
0
    def test_errors(self):
        paddle.enable_static()
        with program_guard(Program(), Program()):

            def test_diagflat_type():
                x = [1, 2, 3]
                output = paddle.diagflat(x)

            self.assertRaises(TypeError, test_diagflat_type)

            x = paddle.static.data('data', [3, 3])
            self.assertRaises(TypeError, paddle.diagflat, x, offset=2.5)
Esempio n. 19
0
    def test_errors(self):
        # test static computation graph: dtype can not be int8
        paddle.enable_static()
        with program_guard(Program(), Program()):
            x = paddle.static.data(name='x', shape=[100], dtype=np.int8)
            y = paddle.static.data(name='y', shape=[100], dtype=np.int8)
            self.assertRaises(TypeError, paddle.inner, x, y)

        # test static computation graph: inputs must be broadcastable
        with program_guard(Program(), Program()):
            x = paddle.static.data(name='x', shape=[20, 50], dtype=np.float64)
            y = paddle.static.data(name='y', shape=[20], dtype=np.float64)
            self.assertRaises(ValueError, paddle.inner, x, y)

        np.random.seed(7)
        # test dynamic computation graph: dtype can not be int8
        paddle.disable_static()
        x_data = np.random.randn(200).astype(np.int8)
        y_data = np.random.randn(200).astype(np.int8)
        x = paddle.to_tensor(x_data)
        y = paddle.to_tensor(y_data)
        self.assertRaises(RuntimeError, paddle.inner, x, y)

        # test dynamic computation graph: inputs must be broadcastable
        x_data = np.random.rand(20, 5)
        y_data = np.random.rand(10, 2)
        x = paddle.to_tensor(x_data)
        y = paddle.to_tensor(y_data)
        self.assertRaises(ValueError, paddle.inner, x, y)

        # test dynamic computation graph: dtype must be same
        x_data = np.random.randn(200).astype(np.float32)
        y_data = np.random.randn(200).astype(np.float64)
        x = paddle.to_tensor(x_data)
        y = paddle.to_tensor(y_data)
        self.assertRaises(ValueError, paddle.inner, x, y)

        # test dynamic computation graph: dtype must be Tensor type
        x_data = np.random.randn(200).astype(np.float64)
        y_data = np.random.randn(200).astype(np.float64)
        y = paddle.to_tensor(y_data)
        self.assertRaises(ValueError, paddle.inner, x_data, y)

        # test dynamic computation graph: dtype must be Tensor type
        x_data = np.random.randn(200).astype(np.float64)
        y_data = np.random.randn(200).astype(np.float64)
        x = paddle.to_tensor(x_data)
        self.assertRaises(ValueError, paddle.inner, x, y_data)

        # test dynamic computation graph: dtype must be Tensor type
        x_data = np.random.randn(200).astype(np.float32)
        y_data = np.random.randn(200).astype(np.float32)
        self.assertRaises(ValueError, paddle.inner, x_data, y_data)
    def test_out(self):
        n = 10
        place = paddle.NPUPlace(0)
        with program_guard(Program(), Program()):
            x1 = paddle.randperm(n)
            x2 = paddle.randperm(n, 'float32')

            exe = paddle.static.Executor(place)
            res = exe.run(fetch_list=[x1, x2])

            self.assertEqual(res[0].dtype, np.int64)
            self.assertEqual(res[1].dtype, np.float32)
            self.assertTrue(check_randperm_out(n, res[0]))
            self.assertTrue(check_randperm_out(n, res[1]))
Esempio n. 21
0
 def static_single_test_median(self, lis_test):
     paddle.enable_static()
     x, axis, keepdims = lis_test
     res_np = np.median(x, axis=axis, keepdims=keepdims)
     if not isinstance(res_np, np.ndarray):
         res_np = np.array([res_np])
     main_program = Program()
     startup_program = Program()
     exe = paddle.static.Executor()
     with program_guard(main_program, startup_program):
         x_in = paddle.fluid.data(shape=x.shape, dtype=x.dtype, name='x')
         y = paddle.median(x_in, axis, keepdims)
         [res_pd] = exe.run(feed={'x': x}, fetch_list=[y])
         self.check_numpy_res(res_pd, res_np)
     paddle.disable_static()
Esempio n. 22
0
 def __init__(self, program=None, in_nodes=[], out_nodes=[]):
     """
     """
     super(GraphWrapper, self).__init__()
     self.program = Program() if program is None else program
     self.persistables = {}
     self.teacher_persistables = {}
     for var in self.program.list_vars():
         if var.persistable:
             self.persistables[var.name] = var
     self.compiled_graph = None
     in_nodes = [] if in_nodes is None else in_nodes
     out_nodes = [] if out_nodes is None else out_nodes
     self.in_nodes = OrderedDict(in_nodes)
     self.out_nodes = OrderedDict(out_nodes)
     self._attrs = OrderedDict()
Esempio n. 23
0
 def test_errors(self):
     with program_guard(Program(), Program()):
         self.assertRaises(TypeError,
                           paddle.randint,
                           5,
                           shape=np.array([2]))
         self.assertRaises(TypeError, paddle.randint, 5, dtype='float32')
         self.assertRaises(ValueError, paddle.randint, 5, 5)
         self.assertRaises(ValueError, paddle.randint, -5)
         self.assertRaises(TypeError, paddle.randint, 5, shape=['2'])
         shape_tensor = paddle.static.data('X', [1])
         self.assertRaises(TypeError, paddle.randint, 5, shape=shape_tensor)
         self.assertRaises(TypeError,
                           paddle.randint,
                           5,
                           shape=[shape_tensor])
Esempio n. 24
0
 def test_tensor_floordiv_scalar(self):
     # tensor(int64) // scalar(int)
     with program_guard(Program()):
         a = paddle.full([2, 2, 2], 3, dtype='int64')
         b = 2
         c = paddle.full([2, 2, 2], 1, dtype="int64")
         self.check_operation(a, b, c, '//')
Esempio n. 25
0
    def test_api(self):
        main_prog = Program()
        with program_guard(main_prog, Program()):
            x = paddle.static.data(name="x",
                                   shape=self.shape,
                                   dtype=self.dtype)
            out = self.executed_paddle_api(x)

        exe = paddle.static.Executor(place=paddle.CPUPlace())
        fetch_x, fetch_out = exe.run(main_prog,
                                     feed={"x": self.np_x},
                                     fetch_list=[x, out])

        self.assertTrue(np.array_equal(fetch_x, self.np_x))
        self.assertTrue(
            self.np_compare(fetch_out, self.executed_numpy_api(self.np_x)))
Esempio n. 26
0
    def test_cpu(self):
        paddle.disable_static(place=paddle.CPUPlace())
        self.run_imperative()
        paddle.enable_static()

        with paddle.static.program_guard(Program()):
            self.run_static()
Esempio n. 27
0
 def test_backward(self):
     switch_main_program(Program())
     loss = self.build_network(False, print_phase='backward')
     exe = paddle.static.Executor(self.place)
     outs = exe.run(feed={'x': self.x_tensor},
                    fetch_list=[loss],
                    return_numpy=False)
Esempio n. 28
0
 def test_no_summarize(self):
     switch_main_program(Program())
     printed = self.build_network(True, summarize=-1, print_phase='forward')
     exe = paddle.static.Executor(self.place)
     outs = exe.run(feed={'x': self.x_tensor},
                    fetch_list=[printed],
                    return_numpy=False)
Esempio n. 29
0
    def test_errors(self):
        with program_guard(Program(), Program()):
            #for ci coverage

            input_data = paddle.fluid.data(name='input',
                                           dtype='float32',
                                           shape=[2, 3])
            output = paddle.full_like(input_data, 2.0)

            def test_input_dtype():
                paddle.full_like

            self.assertRaises(TypeError,
                              paddle.full_like,
                              x=input_data,
                              fill_value=2,
                              dtype='uint4')
Esempio n. 30
0
    def _run_static_graph_case(self, x_data, y_data):
        with program_guard(Program(), Program()):
            paddle.enable_static()
            x = paddle.static.data(
                name='x', shape=x_data.shape, dtype=x_data.dtype)
            y = paddle.static.data(
                name='y', shape=y_data.shape, dtype=y_data.dtype)
            res = tensor.multiply(x, y)

            place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda(
            ) else paddle.CPUPlace()
            exe = paddle.static.Executor(place)
            outs = exe.run(paddle.static.default_main_program(),
                           feed={'x': x_data,
                                 'y': y_data},
                           fetch_list=[res])
            res = outs[0]
            return res