def _test_static(self, place, kwargs): paddle.enable_static() best = float("-10000") if kwargs['mode'] == "max" else float("10000") current_lr = 1.0 cooldown_counter = 0 num_bad_epochs = 0 var_list = [best, current_lr, cooldown_counter, num_bad_epochs] main_prog = paddle.static.Program() start_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, start_prog): x = fluid.layers.create_global_var([1], 1, 'float32', persistable=True) paddle.increment(x) loss = paddle.sin(x) scheduler = paddle.optimizer.lr.ReduceOnPlateau(**kwargs) adam = paddle.optimizer.Adam(learning_rate=scheduler) adam.minimize(loss) lr_var = adam._global_learning_rate() test_prog = main_prog.clone() exe = paddle.static.Executor(place) exe.run(start_prog) for epoch in range(20): for batch_id in range(1): out, actual_lr = exe.run(main_prog, fetch_list=[loss.name, lr_var.name]) expected_lr = reduce_lr_on_plateau( kwargs['factor'], kwargs['threshold'], kwargs['cooldown'], kwargs['patience'], kwargs['mode'], kwargs['threshold_mode'], out[0], var_list) scheduler.step(out[0]) actual_lr = scheduler() self.assertEqual(actual_lr, np.array(expected_lr)) for epoch in range(10): for batch_id in range(1): out, actual_lr = exe.run(test_prog, fetch_list=[loss.name, lr_var.name]) expected_lr = reduce_lr_on_plateau( kwargs['factor'], kwargs['threshold'], kwargs['cooldown'], kwargs['patience'], kwargs['mode'], kwargs['threshold_mode'], out[0], var_list) scheduler.step(out[0]) actual_lr = scheduler() self.assertEqual(actual_lr, np.array(expected_lr))
def test_without_kernel_op(self): main_program = paddle.static.Program() startup_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): i = paddle.full(shape=[1], dtype='int64', fill_value=0) loop_len = paddle.full(shape=[1], dtype='int64', fill_value=10) cond = paddle.less_than(x=i, y=loop_len) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") with paddle.static.device_guard("cpu"): while_op = fluid.layers.While(cond=cond) with while_op.block(): i = paddle.increment(x=i, value=1) fluid.layers.less_than(x=i, y=loop_len, cond=cond) warning = "The Op(while) is not support to set device." warning_num = get_vaild_warning_num(warning, w) assert warning_num == 1 all_ops = main_program.global_block().ops device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName() for op in all_ops: if op.type == 'while': self.assertEqual(op.desc.attr(device_attr_name), "") execute(main_program, startup_program)
def test_increment(self): if fluid.core.is_compiled_with_cuda(): paddle.enable_static() with paddle.fluid.device_guard("gpu:0"): x = paddle.fluid.layers.fill_constant([1], "float32", 0) with paddle.fluid.device_guard("cpu"): x = paddle.increment(x) exe = paddle.static.Executor(paddle.CUDAPlace(0)) a, = exe.run(paddle.static.default_main_program(), fetch_list=[x]) paddle.disable_static() self.assertEqual(a[0], 1)
def test_increment(self): if paddle.fluid.core.is_compiled_with_cuda(): with paddle.fluid.device_guard("gpu:0"): x = paddle.fluid.layers.fill_constant([1], "float32", 0) with paddle.fluid.device_guard("cpu"): x = paddle.increment(x) exe = paddle.static.Executor(paddle.CUDAPlace(0)) os.environ['FLAGS_USE_STANDALONE_EXECUTOR'] = '1' for i in range(10): a, = exe.run(paddle.static.default_main_program(), fetch_list=[x]) self.assertEqual(a[0], 1) del os.environ['FLAGS_USE_STANDALONE_EXECUTOR']
def loop_body(i, loop_len, input_array): pre_input = paddle.tensor.array_read(array=input_array, i=i) mlp_while0 = MLPLayer(hidden_size=hidden_size, intermediate_size=4 * hidden_size, dropout_ratio=0.1, initializer_range=0.02) mlp_while1 = MLPLayer(hidden_size=hidden_size, intermediate_size=4 * hidden_size, dropout_ratio=0.1, initializer_range=0.02) output = mlp_while0(pre_input) cur_pred = mlp_while1(output) # 更新循环条件 i = paddle.increment(x=i, value=1) paddle.tensor.array_write(cur_pred, array=input_array, i=i) return i, loop_len, input_array
def test_tensor_patch_method(self): paddle.disable_static() x_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) y_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) z_np = np.random.uniform(-1, 1, [6, 9]).astype(self.dtype) x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) z = paddle.to_tensor(z_np) a = paddle.to_tensor([[1, 1], [2, 2], [3, 3]]) b = paddle.to_tensor([[1, 1], [2, 2], [3, 3]]) # 1. Unary operation for Tensor self.assertEqual(x.dim(), 2) self.assertEqual(x.ndimension(), 2) self.assertEqual(x.ndim, 2) self.assertEqual(x.size, 6) self.assertEqual(x.numel(), 6) self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy())) self.assertTrue( np.array_equal(x.tanh().numpy(), paddle.tanh(x).numpy())) self.assertTrue( np.array_equal(x.atan().numpy(), paddle.atan(x).numpy())) self.assertTrue(np.array_equal(x.abs().numpy(), paddle.abs(x).numpy())) m = x.abs() self.assertTrue( np.array_equal(m.sqrt().numpy(), paddle.sqrt(m).numpy())) self.assertTrue( np.array_equal(m.rsqrt().numpy(), paddle.rsqrt(m).numpy())) self.assertTrue( np.array_equal(x.ceil().numpy(), paddle.ceil(x).numpy())) self.assertTrue( np.array_equal(x.floor().numpy(), paddle.floor(x).numpy())) self.assertTrue(np.array_equal(x.cos().numpy(), paddle.cos(x).numpy())) self.assertTrue( np.array_equal(x.acos().numpy(), paddle.acos(x).numpy())) self.assertTrue( np.array_equal(x.asin().numpy(), paddle.asin(x).numpy())) self.assertTrue(np.array_equal(x.sin().numpy(), paddle.sin(x).numpy())) self.assertTrue( np.array_equal(x.sinh().numpy(), paddle.sinh(x).numpy())) self.assertTrue( np.array_equal(x.cosh().numpy(), paddle.cosh(x).numpy())) self.assertTrue( np.array_equal(x.round().numpy(), paddle.round(x).numpy())) self.assertTrue( np.array_equal(x.reciprocal().numpy(), paddle.reciprocal(x).numpy())) self.assertTrue( np.array_equal(x.square().numpy(), paddle.square(x).numpy())) self.assertTrue( np.array_equal(x.rank().numpy(), paddle.rank(x).numpy())) self.assertTrue( np.array_equal(x[0].t().numpy(), paddle.t(x[0]).numpy())) self.assertTrue( np.array_equal(x.asinh().numpy(), paddle.asinh(x).numpy())) ### acosh(x) = nan, need to change input t_np = np.random.uniform(1, 2, [2, 3]).astype(self.dtype) t = paddle.to_tensor(t_np) self.assertTrue( np.array_equal(t.acosh().numpy(), paddle.acosh(t).numpy())) self.assertTrue( np.array_equal(x.atanh().numpy(), paddle.atanh(x).numpy())) d = paddle.to_tensor([[1.2285208, 1.3491015, 1.4899898], [1.30058, 1.0688717, 1.4928783], [1.0958099, 1.3724753, 1.8926544]]) d = d.matmul(d.t()) # ROCM not support cholesky if not fluid.core.is_compiled_with_rocm(): self.assertTrue( np.array_equal(d.cholesky().numpy(), paddle.cholesky(d).numpy())) self.assertTrue( np.array_equal(x.is_empty().numpy(), paddle.is_empty(x).numpy())) self.assertTrue( np.array_equal(x.isfinite().numpy(), paddle.isfinite(x).numpy())) self.assertTrue( np.array_equal( x.cast('int32').numpy(), paddle.cast(x, 'int32').numpy())) self.assertTrue( np.array_equal( x.expand([3, 2, 3]).numpy(), paddle.expand(x, [3, 2, 3]).numpy())) self.assertTrue( np.array_equal( x.tile([2, 2]).numpy(), paddle.tile(x, [2, 2]).numpy())) self.assertTrue( np.array_equal(x.flatten().numpy(), paddle.flatten(x).numpy())) index = paddle.to_tensor([0, 1]) self.assertTrue( np.array_equal( x.gather(index).numpy(), paddle.gather(x, index).numpy())) index = paddle.to_tensor([[0, 1], [1, 2]]) self.assertTrue( np.array_equal( x.gather_nd(index).numpy(), paddle.gather_nd(x, index).numpy())) self.assertTrue( np.array_equal( x.reverse([0, 1]).numpy(), paddle.reverse(x, [0, 1]).numpy())) self.assertTrue( np.array_equal( a.reshape([3, 2]).numpy(), paddle.reshape(a, [3, 2]).numpy())) self.assertTrue( np.array_equal( x.slice([0, 1], [0, 0], [1, 2]).numpy(), paddle.slice(x, [0, 1], [0, 0], [1, 2]).numpy())) self.assertTrue( np.array_equal( x.split(2)[0].numpy(), paddle.split(x, 2)[0].numpy())) m = paddle.to_tensor( np.random.uniform(-1, 1, [1, 6, 1, 1]).astype(self.dtype)) self.assertTrue( np.array_equal( m.squeeze([]).numpy(), paddle.squeeze(m, []).numpy())) self.assertTrue( np.array_equal( m.squeeze([1, 2]).numpy(), paddle.squeeze(m, [1, 2]).numpy())) m = paddle.to_tensor([2, 3, 3, 1, 5, 3], 'float32') self.assertTrue( np.array_equal(m.unique()[0].numpy(), paddle.unique(m)[0].numpy())) self.assertTrue( np.array_equal( m.unique(return_counts=True)[1], paddle.unique(m, return_counts=True)[1])) self.assertTrue(np.array_equal(x.flip([0]), paddle.flip(x, [0]))) self.assertTrue(np.array_equal(x.unbind(0), paddle.unbind(x, 0))) self.assertTrue(np.array_equal(x.roll(1), paddle.roll(x, 1))) self.assertTrue(np.array_equal(x.cumsum(1), paddle.cumsum(x, 1))) m = paddle.to_tensor(1) self.assertTrue(np.array_equal(m.increment(), paddle.increment(m))) m = x.abs() self.assertTrue(np.array_equal(m.log(), paddle.log(m))) self.assertTrue(np.array_equal(x.pow(2), paddle.pow(x, 2))) self.assertTrue(np.array_equal(x.reciprocal(), paddle.reciprocal(x))) # 2. Binary operation self.assertTrue( np.array_equal(x.divide(y).numpy(), paddle.divide(x, y).numpy())) self.assertTrue( np.array_equal( x.matmul(y, True, False).numpy(), paddle.matmul(x, y, True, False).numpy())) self.assertTrue( np.array_equal( x.norm(p='fro', axis=[0, 1]).numpy(), paddle.norm(x, p='fro', axis=[0, 1]).numpy())) self.assertTrue( np.array_equal(x.dist(y).numpy(), paddle.dist(x, y).numpy())) self.assertTrue( np.array_equal(x.cross(y).numpy(), paddle.cross(x, y).numpy())) m = x.expand([2, 2, 3]) n = y.expand([2, 2, 3]).transpose([0, 2, 1]) self.assertTrue( np.array_equal(m.bmm(n).numpy(), paddle.bmm(m, n).numpy())) self.assertTrue( np.array_equal( x.histogram(5, -1, 1).numpy(), paddle.histogram(x, 5, -1, 1).numpy())) self.assertTrue( np.array_equal(x.equal(y).numpy(), paddle.equal(x, y).numpy())) self.assertTrue( np.array_equal( x.greater_equal(y).numpy(), paddle.greater_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.greater_than(y).numpy(), paddle.greater_than(x, y).numpy())) self.assertTrue( np.array_equal( x.less_equal(y).numpy(), paddle.less_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.less_than(y).numpy(), paddle.less_than(x, y).numpy())) self.assertTrue( np.array_equal( x.not_equal(y).numpy(), paddle.not_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.equal_all(y).numpy(), paddle.equal_all(x, y).numpy())) self.assertTrue( np.array_equal( x.allclose(y).numpy(), paddle.allclose(x, y).numpy())) m = x.expand([2, 2, 3]) self.assertTrue( np.array_equal( x.expand_as(m).numpy(), paddle.expand_as(x, m).numpy())) index = paddle.to_tensor([2, 1, 0]) self.assertTrue( np.array_equal( a.scatter(index, b).numpy(), paddle.scatter(a, index, b).numpy())) # 3. Bool tensor operation x = paddle.to_tensor([[True, False], [True, False]]) y = paddle.to_tensor([[False, False], [False, True]]) self.assertTrue( np.array_equal( x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_not(y).numpy(), paddle.logical_not(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_or(y).numpy(), paddle.logical_or(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_xor(y).numpy(), paddle.logical_xor(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy())) a = paddle.to_tensor([[1, 2], [3, 4]]) b = paddle.to_tensor([[4, 3], [2, 1]]) self.assertTrue( np.array_equal( x.where(a, b).numpy(), paddle.where(x, a, b).numpy())) x_np = np.random.randn(3, 6, 9, 7) x = paddle.to_tensor(x_np) x_T = x.T self.assertTrue(x_T.shape, [7, 9, 6, 3]) self.assertTrue(np.array_equal(x_T.numpy(), x_np.T)) self.assertTrue(inspect.ismethod(a.dot)) self.assertTrue(inspect.ismethod(a.logsumexp)) self.assertTrue(inspect.ismethod(a.multiplex)) self.assertTrue(inspect.ismethod(a.prod)) self.assertTrue(inspect.ismethod(a.scale)) self.assertTrue(inspect.ismethod(a.stanh)) self.assertTrue(inspect.ismethod(a.add_n)) self.assertTrue(inspect.ismethod(a.max)) self.assertTrue(inspect.ismethod(a.maximum)) self.assertTrue(inspect.ismethod(a.min)) self.assertTrue(inspect.ismethod(a.minimum)) self.assertTrue(inspect.ismethod(a.floor_divide)) self.assertTrue(inspect.ismethod(a.remainder)) self.assertTrue(inspect.ismethod(a.floor_mod)) self.assertTrue(inspect.ismethod(a.multiply)) self.assertTrue(inspect.ismethod(a.logsumexp)) self.assertTrue(inspect.ismethod(a.inverse)) self.assertTrue(inspect.ismethod(a.log1p)) self.assertTrue(inspect.ismethod(a.erf)) self.assertTrue(inspect.ismethod(a.addmm)) self.assertTrue(inspect.ismethod(a.clip)) self.assertTrue(inspect.ismethod(a.trace)) self.assertTrue(inspect.ismethod(a.kron)) self.assertTrue(inspect.ismethod(a.isinf)) self.assertTrue(inspect.ismethod(a.isnan)) self.assertTrue(inspect.ismethod(a.concat)) self.assertTrue(inspect.ismethod(a.broadcast_to)) self.assertTrue(inspect.ismethod(a.scatter_nd_add)) self.assertTrue(inspect.ismethod(a.scatter_nd)) self.assertTrue(inspect.ismethod(a.shard_index)) self.assertTrue(inspect.ismethod(a.chunk)) self.assertTrue(inspect.ismethod(a.stack)) self.assertTrue(inspect.ismethod(a.strided_slice)) self.assertTrue(inspect.ismethod(a.unsqueeze)) self.assertTrue(inspect.ismethod(a.unstack)) self.assertTrue(inspect.ismethod(a.argmax)) self.assertTrue(inspect.ismethod(a.argmin)) self.assertTrue(inspect.ismethod(a.argsort)) self.assertTrue(inspect.ismethod(a.masked_select)) self.assertTrue(inspect.ismethod(a.topk)) self.assertTrue(inspect.ismethod(a.index_select)) self.assertTrue(inspect.ismethod(a.nonzero)) self.assertTrue(inspect.ismethod(a.sort)) self.assertTrue(inspect.ismethod(a.index_sample)) self.assertTrue(inspect.ismethod(a.mean)) self.assertTrue(inspect.ismethod(a.std)) self.assertTrue(inspect.ismethod(a.numel))
def test_tensor_patch_method(self): paddle.disable_static() x_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) y_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) z_np = np.random.uniform(-1, 1, [6, 9]).astype(self.dtype) x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) z = paddle.to_tensor(z_np) a = paddle.to_tensor([[1, 1], [2, 2], [3, 3]]) b = paddle.to_tensor([[1, 1], [2, 2], [3, 3]]) # 1. Unary operation for Tensor self.assertEqual(x.dim(), 2) self.assertEqual(x.ndimension(), 2) self.assertEqual(x.ndim, 2) self.assertEqual(x.size(), [2, 3]) self.assertTrue( np.array_equal(x.sigmoid().numpy(), fluid.layers.sigmoid(x).numpy())) self.assertTrue( np.array_equal(x.logsigmoid().numpy(), fluid.layers.logsigmoid(x).numpy())) self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy())) self.assertTrue( np.array_equal(x.tanh().numpy(), paddle.tanh(x).numpy())) self.assertTrue( np.array_equal(x.atan().numpy(), paddle.atan(x).numpy())) self.assertTrue( np.array_equal(x.tanh_shrink().numpy(), fluid.layers.tanh_shrink(x).numpy())) self.assertTrue(np.array_equal(x.abs().numpy(), paddle.abs(x).numpy())) m = x.abs() self.assertTrue( np.array_equal(m.sqrt().numpy(), paddle.sqrt(m).numpy())) self.assertTrue( np.array_equal(m.rsqrt().numpy(), paddle.rsqrt(m).numpy())) self.assertTrue( np.array_equal(x.ceil().numpy(), paddle.ceil(x).numpy())) self.assertTrue( np.array_equal(x.floor().numpy(), paddle.floor(x).numpy())) self.assertTrue(np.array_equal(x.cos().numpy(), paddle.cos(x).numpy())) self.assertTrue( np.array_equal(x.acos().numpy(), paddle.acos(x).numpy())) self.assertTrue( np.array_equal(x.asin().numpy(), paddle.asin(x).numpy())) self.assertTrue(np.array_equal(x.sin().numpy(), paddle.sin(x).numpy())) self.assertTrue( np.array_equal(x.sinh().numpy(), paddle.sinh(x).numpy())) self.assertTrue( np.array_equal(x.cosh().numpy(), paddle.cosh(x).numpy())) self.assertTrue( np.array_equal(x.round().numpy(), paddle.round(x).numpy())) self.assertTrue( np.array_equal(x.reciprocal().numpy(), paddle.reciprocal(x).numpy())) self.assertTrue( np.array_equal(x.square().numpy(), paddle.square(x).numpy())) self.assertTrue( np.array_equal(x.softplus().numpy(), fluid.layers.softplus(x).numpy())) self.assertTrue( np.array_equal(x.softsign().numpy(), fluid.layers.softsign(x).numpy())) self.assertTrue( np.array_equal(x.rank().numpy(), paddle.rank(x).numpy())) self.assertTrue( np.array_equal(x[0].t().numpy(), paddle.t(x[0]).numpy())) m = paddle.to_tensor(np.random.uniform(1, 2, [3, 3]), 'float32') m = m.matmul(m.t()) self.assertTrue( np.array_equal(m.cholesky().numpy(), paddle.cholesky(m).numpy())) self.assertTrue( np.array_equal(x.is_empty().numpy(), paddle.is_empty(x).numpy())) self.assertTrue( np.array_equal(x.isfinite().numpy(), paddle.isfinite(x).numpy())) self.assertTrue( np.array_equal( x.cast('int32').numpy(), paddle.cast(x, 'int32').numpy())) self.assertTrue( np.array_equal( x.expand([3, 2, 3]).numpy(), paddle.expand(x, [3, 2, 3]).numpy())) self.assertTrue( np.array_equal( x.tile([2, 2]).numpy(), paddle.tile(x, [2, 2]).numpy())) self.assertTrue( np.array_equal(x.flatten().numpy(), paddle.flatten(x).numpy())) index = paddle.to_tensor([0, 1]) self.assertTrue( np.array_equal( x.gather(index).numpy(), paddle.gather(x, index).numpy())) index = paddle.to_tensor([[0, 1], [1, 2]]) self.assertTrue( np.array_equal( x.gather_nd(index).numpy(), paddle.gather_nd(x, index).numpy())) self.assertTrue( np.array_equal( x.reverse([0, 1]).numpy(), paddle.reverse(x, [0, 1]).numpy())) self.assertTrue( np.array_equal( a.reshape([3, 2]).numpy(), paddle.reshape(a, [3, 2]).numpy())) self.assertTrue( np.array_equal( x.slice([0, 1], [0, 0], [1, 2]).numpy(), paddle.slice(x, [0, 1], [0, 0], [1, 2]).numpy())) self.assertTrue( np.array_equal( x.split(2)[0].numpy(), paddle.split(x, 2)[0].numpy())) m = paddle.to_tensor( np.random.uniform(-1, 1, [1, 6, 1, 1]).astype(self.dtype)) self.assertTrue( np.array_equal( m.squeeze([]).numpy(), paddle.squeeze(m, []).numpy())) self.assertTrue( np.array_equal( m.squeeze([1, 2]).numpy(), paddle.squeeze(m, [1, 2]).numpy())) m = paddle.to_tensor([2, 3, 3, 1, 5, 3], 'float32') self.assertTrue( np.array_equal(m.unique()[0].numpy(), paddle.unique(m)[0].numpy())) self.assertTrue( np.array_equal(m.unique_with_counts()[2], paddle.unique_with_counts(m)[2])) self.assertTrue(np.array_equal(x.flip([0]), paddle.flip(x, [0]))) self.assertTrue(np.array_equal(x.unbind(0), paddle.unbind(x, 0))) self.assertTrue(np.array_equal(x.roll(1), paddle.roll(x, 1))) self.assertTrue(np.array_equal(x.cumsum(1), paddle.cumsum(x, 1))) m = paddle.to_tensor(1) self.assertTrue(np.array_equal(m.increment(), paddle.increment(m))) m = x.abs() self.assertTrue(np.array_equal(m.log(), paddle.log(m))) self.assertTrue(np.array_equal(x.pow(2), paddle.pow(x, 2))) self.assertTrue(np.array_equal(x.reciprocal(), paddle.reciprocal(x))) # 2. Binary operation self.assertTrue( np.array_equal( x.matmul(y, True, False).numpy(), paddle.matmul(x, y, True, False).numpy())) self.assertTrue( np.array_equal( x.norm(p='fro', axis=[0, 1]).numpy(), paddle.norm(x, p='fro', axis=[0, 1]).numpy())) self.assertTrue( np.array_equal(x.dist(y).numpy(), paddle.dist(x, y).numpy())) self.assertTrue( np.array_equal(x.cross(y).numpy(), paddle.cross(x, y).numpy())) m = x.expand([2, 2, 3]) n = y.expand([2, 2, 3]).transpose([0, 2, 1]) self.assertTrue( np.array_equal(m.bmm(n).numpy(), paddle.bmm(m, n).numpy())) self.assertTrue( np.array_equal( x.histogram(5, -1, 1).numpy(), paddle.histogram(x, 5, -1, 1).numpy())) self.assertTrue( np.array_equal(x.equal(y).numpy(), paddle.equal(x, y).numpy())) self.assertTrue( np.array_equal( x.greater_equal(y).numpy(), paddle.greater_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.greater_than(y).numpy(), paddle.greater_than(x, y).numpy())) self.assertTrue( np.array_equal( x.less_equal(y).numpy(), paddle.less_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.less_than(y).numpy(), paddle.less_than(x, y).numpy())) self.assertTrue( np.array_equal( x.not_equal(y).numpy(), paddle.not_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.equal_all(y).numpy(), paddle.equal_all(x, y).numpy())) self.assertTrue( np.array_equal( x.allclose(y).numpy(), paddle.allclose(x, y).numpy())) m = x.expand([2, 2, 3]) self.assertTrue( np.array_equal( x.expand_as(m).numpy(), paddle.expand_as(x, m).numpy())) index = paddle.to_tensor([2, 1, 0]) self.assertTrue( np.array_equal( a.scatter(index, b).numpy(), paddle.scatter(a, index, b).numpy())) # 3. Bool tensor operation x = paddle.to_tensor([[True, False], [True, False]]) y = paddle.to_tensor([[False, False], [False, True]]) self.assertTrue( np.array_equal(x.reduce_all().numpy(), paddle.reduce_all(x).numpy())) self.assertTrue( np.array_equal(x.reduce_any().numpy(), paddle.reduce_any(x).numpy())) self.assertTrue( np.array_equal( x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_not(y).numpy(), paddle.logical_not(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_or(y).numpy(), paddle.logical_or(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_xor(y).numpy(), paddle.logical_xor(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy()))