def func_empty_grad(self): with fluid.dygraph.guard(): x = np.ones([2, 2], np.float32) new_var = paddle.to_tensor(x) self.assertIsNone(new_var.gradient()) # TODO(jiabin): Support clear_gradient in eager mode later and remove this if statement if not _in_eager_mode(): try: new_var.clear_gradient() except Exception as e: assert type(e) == core.EnforceNotMet with fluid.dygraph.guard(): cur_program = fluid.Program() cur_block = cur_program.current_block() # Normally, we don't allow tensor with -1 shape being created in dygraph mode, this test is not good. if not _in_eager_mode(): new_variable = cur_block.create_var(name="X", shape=[-1, 23, 48], dtype='float32') else: new_variable = cur_block.create_var(name="X", shape=[1, 23, 48], dtype='float32') try: new_variable.gradient() except Exception as e: assert type(e) == ValueError
def func_tensor_from_numpy(self): data_np = np.array([[2, 3, 1]]).astype('float32') with fluid.dygraph.guard(fluid.CPUPlace()): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") var = fluid.dygraph.to_variable(data_np, zero_copy=True) assert "Currently, zero_copy is not supported, and it will be discarded." in str( w[-1].message) # Temporally diable zero_copy # var = fluid.dygraph.to_variable(data_np, zero_copy=True) # self.assertTrue(np.array_equal(var.numpy(), data_np)) # data_np[0][0] = 4 # self.assertEqual(data_np[0][0], 4) # self.assertEqual(var[0][0].numpy()[0], 4) # self.assertTrue(np.array_equal(var.numpy(), data_np)) var2 = fluid.dygraph.to_variable(data_np, zero_copy=False) self.assertTrue(np.array_equal(var2.numpy(), data_np)) data_np[0][0] = -1 self.assertEqual(data_np[0][0], -1) if _in_eager_mode(): # eager_mode, var2 is EagerTensor, is not subscriptable # TODO(wuweilong): to support slice in eager mode later self.assertNotEqual(var2.numpy()[0][0], -1) else: self.assertNotEqual(var2[0][0].numpy()[0], -1) self.assertFalse(np.array_equal(var2.numpy(), data_np))
def func_create_varbase(self): x = np.ones([2, 2], np.float32) y = np.zeros([3, 3], np.float32) t = fluid.Tensor() t.set(x, fluid.CPUPlace()) if _in_eager_mode(): # TODO(jiabin): Support Kwargs and uncomment these tests # egr_tmp = fluid.core.eager.EagerTensor(value=x, place=fluid.core.CPUPlace()) egr_tmp2 = fluid.core.eager.EagerTensor(y, fluid.core.CPUPlace()) egr_tmp3 = paddle.to_tensor(x) egr_tmp4 = fluid.core.eager.EagerTensor(y) # egr_tmp5 = fluid.core.eager.EagerTensor(value=x) # TODO(jiabin): Support it when we merge LoDTensor with DenseTensor egr_tmp6 = fluid.core.eager.EagerTensor(t) # self.assertTrue(np.array_equal(x, egr_tmp.numpy())) self.assertTrue(np.array_equal(y, egr_tmp2.numpy())) self.assertTrue(np.array_equal(x, egr_tmp3.numpy())) self.assertTrue(np.array_equal(y, egr_tmp4.numpy())) # self.assertTrue(np.array_equal(x, egr_tmp5.numpy())) self.assertTrue(np.array_equal(x, egr_tmp6.numpy())) else: tmp = fluid.core.VarBase(value=x, place=fluid.core.CPUPlace()) tmp2 = fluid.core.VarBase(y, fluid.core.CPUPlace()) tmp3 = paddle.to_tensor(x) tmp4 = fluid.core.VarBase(y) tmp5 = fluid.core.VarBase(value=x) tmp6 = fluid.core.VarBase(t) self.assertTrue(np.array_equal(x, tmp.numpy())) self.assertTrue(np.array_equal(y, tmp2.numpy())) self.assertTrue(np.array_equal(x, tmp3.numpy())) self.assertTrue(np.array_equal(y, tmp4.numpy())) self.assertTrue(np.array_equal(x, tmp5.numpy())) self.assertTrue(np.array_equal(x, tmp6.numpy()))
def test__test_eager_guard(self): tracer = paddle.fluid.dygraph.tracer.Tracer() with _test_eager_guard(tracer): self.assertTrue(_in_eager_mode())