def test_errors(self): with program_guard(Program(), Program()): # the input of elementwise_mul must be Variable. x1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace()) y1 = fluid.create_lod_tensor(np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace()) self.assertRaises(TypeError, fluid.layers.elementwise_mul, x1, y1) # the input dtype of elementwise_mul must be float16 or float32 or int32 x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8") y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8") self.assertRaises(TypeError, fluid.layers.elementwise_mul, x2, y2)
def __run_static_graph_case(self, x_data, y_data, axis=-1): with program_guard(Program(), Program()): x = paddle.static.data( name='x', shape=x_data.shape, dtype=x_data.dtype) y = paddle.static.data( name='y', shape=y_data.shape, dtype=y_data.dtype) res = tensor.multiply(x, y, axis=axis) place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( ) else fluid.CPUPlace() exe = fluid.Executor(place) outs = exe.run(fluid.default_main_program(), feed={'x': x_data, 'y': y_data}, fetch_list=[res]) res = outs[0] return res
def test_shifts_as_tensor_static(self): with program_guard(Program(), Program()): x = paddle.arange(9).reshape([3, 3]).astype('float32') shape = paddle.shape(x) shifts = shape // 2 axes = [0, 1] out = paddle.roll(x, shifts=shifts, axis=axes) expected_out = np.array([[8, 6, 7], [2, 0, 1], [5, 3, 4]]) exe = fluid.Executor(fluid.CPUPlace()) [out_np] = exe.run(fetch_list=[out]) self.assertTrue(np.allclose(out_np, expected_out)) if paddle.is_compiled_with_cuda(): exe = fluid.Executor(fluid.CPUPlace()) [out_np] = exe.run(fetch_list=[out]) self.assertTrue(np.allclose(out_np, expected_out))
def test_grad(self): place = core.CPUPlace() program = Program() with program_guard(program): x = layers.data( name='x', shape=[1], dtype='float32', stop_gradient=False) y = layers.data( name='y', shape=[1], dtype='bool', stop_gradient=False) level = 0 out_true, out_false = split_lod_tensor(input=x, mask=y, level=level) out = merge_lod_tensor( in_true=out_true, in_false=out_false, mask=y, x=x, level=level) mean = layers.mean(out) append_backward(mean) tensor = core.LoDTensor() tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place) tensor.set_recursive_sequence_lengths([[3, 6, 1]]) mask_np = np.array([0, 1, 0]).astype('bool') mask_np = np.expand_dims(mask_np, axis=1) mask = core.LoDTensor() mask.set(mask_np, place) exe = Executor(place) scope = core.Scope() g_vars = program.global_block().var(x.name + "@GRAD") g_out = [ item.sum() for item in map(np.array, exe.run(program, feed={'x': tensor, 'y': mask}, fetch_list=[g_vars], scope=scope, return_numpy=False)) ] g_out_sum = np.array(g_out).sum() self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
def test_broadcast_api_3(self): paddle.enable_static() with program_guard(Program(), Program()): x = paddle.static.data(name='x', shape=[5], dtype=typename) y = paddle.static.data(name='y', shape=[3, 1], dtype=typename) op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = paddle.static.Executor(self.place) input_x = np.arange(0, 5).reshape((5)).astype(typename) input_y = np.array([5, 3, 2]).reshape((3, 1)).astype(typename) real_result = callback(input_x, input_y) res, = exe.run(feed={ "x": input_x, "y": input_y }, fetch_list=[out]) self.assertEqual((res == real_result).all(), True)
def test_errors(self): with program_guard(Program(), Program()): x = numpy.random.random((2, 4)).astype("float32") def test_Variable(): rank_table = lod_rank_table(x=x, level=1) self.assertRaises(TypeError, test_Variable) def test_list_Variable(): rank_table = lod_rank_table(x=[x], level=1) self.assertRaises(TypeError, test_list_Variable) x = data(name='x', shape=[10], dtype='float32', lod_level=1) out = lod_rank_table(x=x, level=0) out = lod_rank_table(x=[x], level=0)
def test_fluid_out(self): with program_guard(Program()): zeros = fluid.layers.zeros(shape=[10], dtype="int64") place = paddle.CPUPlace() exe = paddle.static.Executor(place) result, = exe.run(fetch_list=[zeros]) expected_result = np.zeros(10, dtype="int64") self.assertEqual((result == expected_result).all(), True)
def test_bool_broadcast_api_4(self): paddle.enable_static() with program_guard(Program(), Program()): x = paddle.static.data(name='x', shape=[3, 1], dtype='bool') y = paddle.static.data(name='y', shape=[1], dtype='bool') op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = paddle.static.Executor(self.place) input_x = np.array([True, False, True]).astype(np.bool) input_y = np.array([True]).astype(np.bool) real_result = callback(input_x, input_y) res, = exe.run(feed={ "x": input_x, "y": input_y }, fetch_list=[out]) self.assertEqual((res == real_result).all(), True)
def test_errors(self): with program_guard(Program(), Program()): def test_Variable(): input_data = np.random.random((1, 1536)).astype("float32") fluid.layers.dynamic_gru(input=input_data, size=512) self.assertRaises(TypeError, test_Variable) def test_h_0(): in_data = fluid.data(name="input", shape=[None, 1536], dtype="float32") h = fluid.data(name="h", shape=[None, 512], dtype="int32") fluid.layers.dynamic_gru(input=in_data, size=512, h_0=h) self.assertRaises(TypeError, test_h_0)
def test_errors(self): with program_guard(Program(), Program()): # the input of Pool2D must be Variable. data1 = np.random.random((3, 32, 32, 5)).astype('float32') pool2d = fluid.dygraph.Pool2D(pool_size=2, pool_type='max', pool_stride=1, global_pooling=False) self.assertRaises(TypeError, pool2d, data1) # the input dtype of Pool2D must be uint8 or int8 or float16 or float32 or float64 # uint8 and int8 only can be set on mkldnn # float16 only can be set on GPU place data2 = fluid.layers.data(name='x1', shape=[3, 32, 32, 5], dtype="int32") self.assertRaises(TypeError, pool2d, data2)
def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): def test_diag_v2_type(): x = [1, 2, 3] output = paddle.diag(x) self.assertRaises(TypeError, test_diag_v2_type) x = paddle.static.data('data', [3, 3]) self.assertRaises(TypeError, paddle.diag, x, offset=2.5) self.assertRaises(TypeError, paddle.diag, x, padding_value=[9]) x = paddle.static.data('data2', [3, 3, 3]) self.assertRaises(ValueError, paddle.diag, x)
def test_api(self, use_cuda=False): for x_stop_gradient in [False, True]: for y_stop_gradient in [False, True]: with fluid.program_guard(Program(), Program()): cond = fluid.layers.data(name='cond', shape=self.shape, dtype='bool') x = fluid.layers.data(name='x', shape=self.shape, dtype='float32') y = fluid.layers.data(name='y', shape=self.shape, dtype='float32') x.stop_gradient = x_stop_gradient y.stop_gradient = y_stop_gradient result = paddle.where(cond, x, y) append_backward(layers.mean(result)) for use_cuda in [False, True]: if (use_cuda and (not fluid.core.is_compiled_with_cuda())): break place = (fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()) exe = fluid.Executor(place) fetch_list = [result, result.grad_name] if (x_stop_gradient is False): fetch_list.append(x.grad_name) if (y_stop_gradient is False): fetch_list.append(y.grad_name) out = exe.run(fluid.default_main_program(), feed={ 'cond': self.cond, 'x': self.x, 'y': self.y }, fetch_list=fetch_list) assert np.array_equal(out[0], self.out) if (x_stop_gradient is False): assert np.array_equal(out[2], self.ref_x_backward(out[1])) if (y.stop_gradient is False): assert np.array_equal( out[3], self.ref_y_backward(out[1])) elif (y.stop_gradient is False): assert np.array_equal(out[2], self.ref_y_backward(out[1]))
def test_errors(self): with program_guard(Program(), Program()): def test_dtype(): fluid.layers.linspace(0, 10, 1, dtype="int8") self.assertRaises(TypeError, test_dtype) def test_dtype1(): fluid.layers.linspace(0, 10, 1.33, dtype="int32") self.assertRaises(TypeError, test_dtype1) def test_start_type(): fluid.layers.linspace([0], 10, 1, dtype="float32") self.assertRaises(TypeError, test_start_type) def test_end_type(): fluid.layers.linspace(0, [10], 1, dtype="float32") self.assertRaises(TypeError, test_end_type) def test_step_dtype(): fluid.layers.linspace(0, 10, [0], dtype="float32") self.assertRaises(TypeError, test_step_dtype) def test_start_dtype(): start = fluid.data(shape=[1], dtype="float64", name="start") fluid.layers.linspace(start, 10, 1, dtype="float32") self.assertRaises(ValueError, test_start_dtype) def test_end_dtype(): end = fluid.data(shape=[1], dtype="float64", name="end") fluid.layers.linspace(0, end, 1, dtype="float32") self.assertRaises(ValueError, test_end_dtype) def test_num_dtype(): num = fluid.data(shape=[1], dtype="int32", name="step") fluid.layers.linspace(0, 10, num, dtype="float32") self.assertRaises(TypeError, test_step_dtype)
def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): a = fluid.layers.data(name='a', shape=[2], dtype='float32') b = fluid.layers.data(name='b', shape=[2], dtype='float32') c = fluid.layers.data(name='c', shape=[2], dtype='int16') d = fluid.create_lod_tensor(np.array([[-1]]), [[1]], self.place) op = eval("fluid.layers.%s" % self.op_type) self.assertRaises(TypeError, op, x=a, y=b, axis=True) self.assertRaises(TypeError, op, x=a, y=b, force_cpu=1) self.assertRaises(TypeError, op, x=a, y=b, cond=1) self.assertRaises(TypeError, op, x=a, y=c) self.assertRaises(TypeError, op, x=c, y=a) self.assertRaises(TypeError, op, x=a, y=d) self.assertRaises(TypeError, op, x=d, y=a) self.assertRaises(TypeError, op, x=c, y=d)
def test_errors(self): with program_guard(Program(), Program()): # The input must be Variable. x1 = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float64") target_lod = [2, 2] self.assertRaises(TypeError, fluid.layers.lod_reset, x1, target_lod) # Input(x) dtype must be float32 or float64 or int32 or int64 for dtype in ["bool", "float16"]: x2 = fluid.layers.data(name='x2' + dtype, shape=[4], dtype=dtype) y2 = fluid.layers.data(name='y2' + dtype, shape=[4], dtype='int32', lod_level=2) self.assertRaises(TypeError, fluid.layers.lod_reset, x2, y2)
def test_errors(self): with program_guard(Program(), Program()): x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype('float64') y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype('float64') cond_i = np.array([False, False, True, True]).astype('bool') def test_Variable(): paddle.where(cond_i, x_i, y_i) self.assertRaises(TypeError, test_Variable) def test_type(): x = fluid.layers.data(name='x', shape=[4], dtype='bool') y = fluid.layers.data(name='y', shape=[4], dtype='float16') cond = fluid.layers.data(name='cond', shape=[4], dtype='int32') paddle.where(cond, x, y) self.assertRaises(TypeError, test_type)
def test_errors(self): with program_guard(Program(), Program()): x = fluid.layers.data(name="x", shape=[245, 30, 30], dtype="float32") rois = fluid.layers.data(name="rois", shape=[4], dtype="float32", lod_level=1) # spatial_scale must be float type self.assertRaises(TypeError, fluid.layers.prroi_pool, x, rois, 2, 7, 7) # pooled_height must be int type self.assertRaises(TypeError, fluid.layers.prroi_pool, x, rois, 0.25, 0.7, 7) # pooled_width must be int type self.assertRaises(TypeError, fluid.layers.prroi_pool, x, rois, 0.25, 7, 0.7)
def test_renorm_api(self): paddle.enable_static() self.input_data() # case 1: with program_guard(Program(), Program()): #x = fluid.layers.data(name = 'x',shape=[-1, 2, 3]) x = paddle.static.data(name="x", shape=[-1, 2, 3], dtype='float64') z = paddle.renorm(x, self.p, self.dim, self.max_norm) exe = fluid.Executor(fluid.CPUPlace()) res, = exe.run(feed={"x": self.data_x}, fetch_list=[z], return_numpy=False) expected = np.array([[[0.40594056, 0.29285714, -0.41000000], [0.60891086, 0.04392857, 0.61500001]], [[0.40594056, -1.17142856, 0.41000000], [0.62920785, 0.54178572, 0.61500001]]]) self.assertTrue(np.allclose(expected, np.array(res)))
def test_error(self): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): x = layers.data( name='x', shape=[1], dtype='float32', stop_gradient=False) y = layers.data( name='y', shape=[1], dtype='bool', stop_gradient=False) level = 0 with self.assertRaises(TypeError): split_lod_tensor(input=set(), mask=y, level=level) with self.assertRaises(TypeError): split_lod_tensor(input=x, mask=set(), level=level) with self.assertRaises(TypeError): split_lod_tensor(input=x, mask=set(), level=None)
def test_errors(self): with program_guard(Program(), Program()): x_data = np.random.random((2, 2, 2, 2)).astype("float32") y_data = np.random.random((2, 2, 2, 2)).astype("float32") def test_Variable_x(): var_y = fluid.data( name="data_y", shape=[2, 2, 2, 2], dtype="float32") fluid.layers.pad_constant_like(x=x_data, y=var_y) self.assertRaises(TypeError, test_Variable_x) def test_Variable_y(): var_x = fluid.data( name="data_x", shape=[2, 2, 2, 2], dtype="float32") fluid.layers.pad_constant_like(x=var_x, y=y_data) self.assertRaises(TypeError, test_Variable_y)
def test_errors(self): main_prog = Program() start_prog = Program() with program_guard(main_prog, start_prog): def test_Variable(): x1 = fluid.create_lod_tensor( np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace()) fluid.layers.uniform_random(x1) self.assertRaises(TypeError, test_Variable) def test_dtype(): x2 = fluid.layers.data( name='x2', shape=[4, 784], dtype='float32') fluid.layers.uniform_random(x2, 'int32') self.assertRaises(TypeError, test_dtype)
def test_addcmul_with_broadcast0(self): program = Program() with program_guard(program): input = fluid.data(name='in', shape=[3, 100], dtype='float32') tensor1 = fluid.data(name='t1', shape=[3, 100], dtype='float32') tensor2 = fluid.data(name='t2', shape=[100], dtype='float32') out = paddle.addcmul(input, tensor1, tensor2) self.assertEqual(out.shape, input.shape)
def test_static(self): with program_guard(Program(), Program()): # The input type of sign_op must be Variable or numpy.ndarray. input1 = 12 self.assertRaises(TypeError, paddle.tensor.math.sign, input1) # The input dtype of sign_op must be float16, float32, float64. input2 = fluid.layers.data(name='input2', shape=[12, 10], dtype="int32") input3 = fluid.layers.data(name='input3', shape=[12, 10], dtype="int64") self.assertRaises(TypeError, paddle.tensor.math.sign, input2) self.assertRaises(TypeError, paddle.tensor.math.sign, input3) input4 = fluid.layers.data(name='input4', shape=[4], dtype="float16") paddle.sign(input4)
def test_errors(self): main_prog = Program() start_prog = Program() with program_guard(main_prog, start_prog): def test_Variable(): x1 = fluid.create_lod_tensor(np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace()) rand(x1) self.assertRaises(TypeError, test_Variable) def test_dtype(): dim_1 = fluid.layers.fill_constant([1], "int64", 3) dim_2 = fluid.layers.fill_constant([1], "int32", 5) rand(shape=[dim_1, dim_2], dtype='int32') self.assertRaises(TypeError, test_dtype)
def test_error(self): with program_guard(Program(), Program()): # The argument shape's size of randn_op should not be 0. def test_shape_size(): out = paddle.randn(shape=[]) self.assertRaises(AssertionError, test_shape_size) # The argument shape's type of randn_op should be list or tuple. def test_shape_type(): out = paddle.randn(shape=1) self.assertRaises(TypeError, test_shape_type) # The argument dtype of randn_op should be float32 or float64. def test_dtype_float16(): out = paddle.randn(shape=[1, 2], dtype='float16') self.assertRaises(TypeError, test_dtype_float16) # The argument dtype of randn_op should be float32 or float64. def test_dtype_int32(): out = paddle.randn(shape=[1, 2], dtype='int32') self.assertRaises(TypeError, test_dtype_int32) # The argument dtype of randn_op should be float32 or float64. def test_dtype_int64(): out = paddle.randn(shape=[1, 2], dtype='int64') self.assertRaises(TypeError, test_dtype_int64) # The argument dtype of randn_op should be float32 or float64. def test_dtype_uint8(): out = paddle.randn(shape=[1, 2], dtype='uint8') self.assertRaises(TypeError, test_dtype_uint8) # The argument dtype of randn_op should be float32 or float64. def test_dtype_bool(): out = paddle.randn(shape=[1, 2], dtype='bool') self.assertRaises(TypeError, test_dtype_bool)
def test_errors(self): """test_errors.""" # test static computation graph: dtype can not be int8 paddle.enable_static() with program_guard(Program(), Program()): x = paddle.static.data(name='x', shape=[100], dtype=np.int8) y = paddle.static.data(name='y', shape=[100], dtype=np.int8) self.assertRaises(TypeError, tensor.multiply, x, y) # test static computation graph: inputs must be broadcastable with program_guard(Program(), Program()): x = paddle.static.data(name='x', shape=[20, 50], dtype=np.float64) y = paddle.static.data(name='y', shape=[20], dtype=np.float64) self.assertRaises(fluid.core.EnforceNotMet, tensor.multiply, x, y) np.random.seed(7) # test dynamic computation graph: dtype can not be int8 paddle.disable_static() x_data = np.random.randn(200).astype(np.int8) y_data = np.random.randn(200).astype(np.int8) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y) # test dynamic computation graph: inputs must be broadcastable x_data = np.random.rand(200, 5) y_data = np.random.rand(200) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y) # test dynamic computation graph: inputs must be broadcastable(python) x_data = np.random.rand(200, 5) y_data = np.random.rand(200) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y) # test dynamic computation graph: dtype must be same x_data = np.random.randn(200).astype(np.int64) y_data = np.random.randn(200).astype(np.float64) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) self.assertRaises(TypeError, paddle.multiply, x, y)
def test_addcmul(self): program = Program() with program_guard(program): data_shape = [3, 64, 64] input = fluid.data(name='in', shape=data_shape, dtype='float32') tensor1 = fluid.data(name='t1', shape=data_shape, dtype='float32') tensor2 = fluid.data(name='t2', shape=data_shape, dtype='float32') out = paddle.addcmul(input, tensor1, tensor2) self.assertEqual(out.shape, input.shape)
def test_addcmul_has_out(self): program = Program() with program_guard(program): input = fluid.data(name='in', shape=[4, 100], dtype='float32') tensor1 = fluid.data(name='t1', shape=[100], dtype='float32') tensor2 = fluid.data(name='t2', shape=[100], dtype='float32') out = fluid.data(name='out', shape=[4, 100], dtype='float32') out = paddle.addcmul(input, tensor1, tensor2, out=out) self.assertEqual(out.shape, input.shape)
def test_errors(self): with program_guard(Program(), Program()): # The input type of solve_op must be Variable. x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], fluid.CPUPlace()) y1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], fluid.CPUPlace()) self.assertRaises(TypeError, paddle.linalg.triangular_solve, x1, y1) # The data type of input must be float32 or float64. x2 = fluid.data(name="x2", shape=[30, 30], dtype="bool") y2 = fluid.data(name="y2", shape=[30, 10], dtype="bool") self.assertRaises(TypeError, paddle.linalg.triangular_solve, x2, y2) x3 = fluid.data(name="x3", shape=[30, 30], dtype="int32") y3 = fluid.data(name="y3", shape=[30, 10], dtype="int32") self.assertRaises(TypeError, paddle.linalg.triangular_solve, x3, y3) x4 = fluid.data(name="x4", shape=[30, 30], dtype="float16") y4 = fluid.data(name="y4", shape=[30, 10], dtype="float16") self.assertRaises(TypeError, paddle.linalg.triangular_solve, x4, y4) # The number of dimensions of input'X must be >= 2. x5 = fluid.data(name="x5", shape=[30], dtype="float64") y5 = fluid.data(name="y5", shape=[30, 30], dtype="float64") self.assertRaises(ValueError, paddle.linalg.triangular_solve, x5, y5) # The number of dimensions of input'Y must be >= 2. x6 = fluid.data(name="x6", shape=[30, 30], dtype="float64") y6 = fluid.data(name="y6", shape=[30], dtype="float64") self.assertRaises(ValueError, paddle.linalg.triangular_solve, x6, y6) # The inner-most 2 dimensions of input'X should be equal to each other x7 = fluid.data(name="x7", shape=[2, 3, 4], dtype="float64") y7 = fluid.data(name="y7", shape=[2, 4, 3], dtype="float64") self.assertRaises(ValueError, paddle.linalg.triangular_solve, x7, y7)
def test_errors(self): with program_guard(Program(), Program()): def test_Variable(): # the input of dropout must be Variable. x1 = fluid.create_lod_tensor( np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace()) fluid.layers.dropout(x1, dropout_prob=0.5) self.assertRaises(TypeError, test_Variable) def test_dtype(): # the input dtype of dropout must be float16 or float32 or float64 # float16 only can be set on GPU place x2 = fluid.layers.data( name='x2', shape=[3, 4, 5, 6], dtype="int32") fluid.layers.dropout(x2, dropout_prob=0.5) self.assertRaises(TypeError, test_dtype)