def test_nll_loss_function_reduction_imperative_not_sum_mean_none(): with fluid.dygraph.guard(): x_np = np.random.random(size=(5, 3)).astype(np.float64) label_np = np.random.randint(0, 3, size=(5, )).astype(np.int64) x = paddle.to_variable(x_np) label = paddle.to_variable(label_np) res = paddle.nn.functional.nll_loss(x, label, reduction='')
def test_transformer(self): batch_size, d_model, n_head, dim_feedforward, dropout, _, _, source_length, target_length = generate_basic_params( mode="decoder_layer") # batch_size, source_length, target_length, d_model, n_head = 4, 8, 8, 64, 8 with fluid.dygraph.guard(fluid.CPUPlace()): transformer = Transformer(d_model, n_head, dim_feedforward=dim_feedforward, dropout=dropout) src = paddle.to_variable( np.random.rand(batch_size, source_length, d_model).astype("float32")) tgt = paddle.to_variable( np.random.rand(batch_size, target_length, d_model).astype("float32")) src_mask = np.zeros((batch_size, n_head, source_length, source_length)).astype("float32") src_mask[0][0][0][0] = -np.inf src_mask = paddle.to_variable(src_mask) tgt_mask = np.zeros((batch_size, n_head, target_length, target_length)).astype("float32") tgt_mask[0][0][0][0] = -1e9 memory_mask = np.zeros((batch_size, n_head, target_length, source_length)).astype("float32") memory_mask[0][0][0][0] = -1e9 tgt_mask, memory_mask = paddle.to_variable( tgt_mask), paddle.to_variable(memory_mask) trans_output = transformer(src, tgt, src_mask, tgt_mask, memory_mask)
def test_functional_paddle_imperative_dygraph_context(self): self.assertFalse(paddle.in_dynamic_mode()) paddle.disable_static() self.assertTrue(paddle.in_dynamic_mode()) np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) var_inp = paddle.to_variable(np_inp) mlp = MLP(input_size=2) out = mlp(var_inp) dy_out1 = out.numpy() out.backward() dy_grad1 = mlp._linear1.weight.gradient() paddle.enable_static() self.assertFalse(paddle.in_dynamic_mode()) paddle.disable_static() self.assertTrue(paddle.in_dynamic_mode()) var_inp = paddle.to_variable(np_inp) mlp = MLP(input_size=2) out = mlp(var_inp) dy_out2 = out.numpy() out.backward() dy_grad2 = mlp._linear1.weight.gradient() paddle.enable_static() self.assertFalse(paddle.in_dynamic_mode()) self.assertTrue(np.array_equal(dy_out1, dy_out2)) self.assertTrue(np.array_equal(dy_grad1, dy_grad2))
def test_NNFunctionalMseLoss_none(self): for dim in [[10, 10], [2, 10, 10], [3, 3, 10, 10]]: input_np = np.random.uniform(0.1, 0.5, dim).astype("float32") target_np = np.random.uniform(0.1, 0.5, dim).astype("float32") paddle.enable_static() prog = paddle.static.Program() startup_prog = paddle.static.Program() place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( ) else paddle.CPUPlace() with paddle.static.program_guard(prog, startup_prog): input = paddle.data(name='input', shape=dim, dtype='float32') target = paddle.data(name='target', shape=dim, dtype='float32') mse_loss = paddle.nn.functional.mse_loss(input, target, 'none') exe = paddle.static.Executor(place) exe.run(startup_prog) static_result = exe.run( prog, feed={"input": input_np, "target": target_np}, fetch_list=[mse_loss]) paddle.disable_static() dy_ret = paddle.nn.functional.mse_loss( paddle.to_variable(input_np), paddle.to_variable(target_np), 'none') dy_result = dy_ret.numpy() sub = input_np - target_np expected = sub * sub self.assertTrue(np.allclose(static_result, expected)) self.assertTrue(np.allclose(static_result, dy_result)) self.assertTrue(np.allclose(dy_result, expected)) self.assertTrue(dy_result.shape, [1])
def test_x_dim_imperative_lt_2(): with fluid.dygraph.guard(): x_np = np.random.random(size=(5, )).astype(np.float64) label_np = np.random.randint(0, 10, size=(5, )).astype(np.int64) x = paddle.to_variable(x_np) label = paddle.to_variable(label_np) nll_loss = paddle.nn.loss.NLLLoss() res = nll_loss(x, label)
def test_dynamic_api(self): paddle.disable_static() np_x = np.array([10, 10]).astype('float64') x = paddle.to_variable(self.input_x) y = paddle.to_variable(self.input_y) z = paddle.maximum(x, y) np_z = z.numpy() z_expected = np.array(np.maximum(self.input_x, self.input_y)) self.assertEqual((np_z == z_expected).all(), True)
def run_kl_loss(self, reduction, shape=(5, 20)): x = np.random.uniform(-10, 10, shape).astype('float64') target = np.random.uniform(-10, 10, shape).astype('float64') gt_loss = kldiv_loss(x, target, reduction) with paddle.fluid.dygraph.guard(): kldiv_criterion = paddle.nn.KLDivLoss(reduction) pred_loss = kldiv_criterion( paddle.to_variable(x), paddle.to_variable(target)) self.assertTrue(np.allclose(pred_loss.numpy(), gt_loss))
def test_broadcast_axis(self): paddle.disable_static() np_x = np.random.rand(5, 4, 3, 2).astype("float64") np_y = np.random.rand(4, 3).astype("float64") x = paddle.to_variable(self.input_x) y = paddle.to_variable(self.input_y) result_1 = paddle.maximum(x, y, axis=1) result_2 = paddle.maximum(x, y, axis=-2) self.assertEqual((result_1.numpy() == result_2.numpy()).all(), True)
def test_with_initial_state(self): rnn1 = self.rnn1 rnn2 = self.rnn2 x = np.random.randn(4, 16) prev_h = np.random.randn(4, 32) y1, h1 = rnn1(x, prev_h) y2, h2 = rnn2(paddle.to_variable(x), paddle.to_variable(prev_h)) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
def test_dygraph(x_np, y_np, p=2.0, epsilon=1e-6, keepdim=False): paddle.disable_static() x = paddle.to_variable(x_np) y = paddle.to_variable(y_np) dist = paddle.nn.layer.distance.PairwiseDistance( p=p, epsilon=epsilon, keepdim=keepdim) distance = dist(x, y) dygraph_ret = distance.numpy() paddle.enable_static() return dygraph_ret
def test_with_initial_state(self): rnn1 = self.rnn1 rnn2 = self.rnn2 x = np.random.randn(12, 4, 16) if not self.time_major: x = np.transpose(x, [1, 0, 2]) prev_h = np.random.randn(2 * self.num_directions, 4, 32) y1, h1 = rnn1(x, prev_h) y2, h2 = rnn2(paddle.to_variable(x), paddle.to_variable(prev_h)) np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
def set_input(self, input): """Unpack input data from the dataloader and perform necessary pre-processing steps. Parameters: input (dict): include the data itself and its metadata information. The option 'direction' can be used to swap images in domain A and domain B. """ AtoB = self.opt.dataset.train.direction == 'AtoB' self.real_A = paddle.to_variable(input['A' if AtoB else 'B']) self.real_B = paddle.to_variable(input['B' if AtoB else 'A']) self.image_paths = input['A_paths' if AtoB else 'B_paths']
def test_imperative(self): in1 = np.array([[1, 2, 3], [4, 5, 6]]) in2 = np.array([[11, 12, 13], [14, 15, 16]]) in3 = np.array([[21, 22], [23, 24]]) paddle.disable_static() x1 = paddle.to_variable(in1) x2 = paddle.to_variable(in2) x3 = paddle.to_variable(in3) out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1) out2 = paddle.concat(x=[x1, x2], axis=0) np_out1 = np.concatenate([in1, in2, in3], axis=-1) np_out2 = np.concatenate([in1, in2], axis=0) paddle.enable_static() self.assertEqual((out1.numpy() == np_out1).all(), True) self.assertEqual((out2.numpy() == np_out2).all(), True)
def test_api_1(self): paddle.disable_static(self.place) var_x = paddle.to_variable(self.input_data) out = paddle.sort(var_x, axis=-1) self.assertEqual((np.sort(self.input_data, axis=-1) == out.numpy()).all(), True) paddle.enable_static()
def run_cases(self): data_np = np.arange(12).reshape(3, 4) data = to_variable(data_np) y = paddle.cumsum(data) z = np.cumsum(data_np) self.assertTrue(np.array_equal(z, y.numpy())) y = paddle.cumsum(data, axis=0) z = np.cumsum(data_np, axis=0) self.assertTrue(np.array_equal(z, y.numpy())) y = paddle.cumsum(data, axis=-1) z = np.cumsum(data_np, axis=-1) self.assertTrue(np.array_equal(z, y.numpy())) y = paddle.cumsum(data, dtype='float64') self.assertTrue(y.dtype == core.VarDesc.VarType.FP64) y = paddle.cumsum(data, dtype=np.int32) self.assertTrue(y.dtype == core.VarDesc.VarType.INT32) y = paddle.cumsum(data, axis=-2) z = np.cumsum(data_np, axis=-2) self.assertTrue(np.array_equal(z, y.numpy()))
def test_dynamic_graph(self): for use_cuda in ([False, True] if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) x = paddle.to_variable(self.x_np) out_1 = paddle.nn.functional.adaptive_max_pool2d( x=x, return_indices=False, output_size=[3, 3]) out_2 = paddle.nn.functional.adaptive_max_pool2d(x=x, output_size=5) out_3 = paddle.nn.functional.adaptive_max_pool2d( x=x, output_size=[2, 5]) #out_4 = paddle.nn.functional.adaptive_max_pool2d( # x=x, output_size=[3, 3], data_format="NHWC") out_5 = paddle.nn.functional.adaptive_max_pool2d( x=x, output_size=[None, 3]) assert np.allclose(out_1.numpy(), self.res_1_np) assert np.allclose(out_2.numpy(), self.res_2_np) assert np.allclose(out_3.numpy(), self.res_3_np) #assert np.allclose(out_4.numpy(), self.res_4_np) assert np.allclose(out_5.numpy(), self.res_5_np)
def test_dygraph_2(self): paddle.disable_static() shape = [12, 13] axis = 0 eps = 1e-6 np.random.seed(1) np_x1 = np.random.rand(*shape).astype(np.float32) np_x2 = np.random.rand(*shape).astype(np.float32) np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps) tesnor_x1 = paddle.to_variable(np_x1) tesnor_x2 = paddle.to_variable(np_x2) y = F.cosine_similarity(tesnor_x1, tesnor_x2, axis=axis, eps=eps) self.assertTrue(np.allclose(y.numpy(), np_out))
def test_dynamic_graph(self): for use_cuda in ([False, True] if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) x = paddle.to_variable(self.x_np) out_1 = paddle.nn.functional.adaptive_avg_pool3d( x=x, output_size=[3, 3, 3]) out_2 = paddle.nn.functional.adaptive_avg_pool3d(x=x, output_size=5) out_3 = paddle.nn.functional.adaptive_avg_pool3d( x=x, output_size=[2, 3, 5]) out_4 = paddle.nn.functional.adaptive_avg_pool3d( x=x, output_size=[3, 3, 3], data_format="NDHWC") out_5 = paddle.nn.functional.adaptive_avg_pool3d( x=x, output_size=[None, 3, None]) out_6 = paddle.nn.functional.interpolate( x=x, mode="area", size=[2, 3, 5]) assert np.allclose(out_1.numpy(), self.res_1_np) assert np.allclose(out_2.numpy(), self.res_2_np) assert np.allclose(out_3.numpy(), self.res_3_np) assert np.allclose(out_4.numpy(), self.res_4_np) assert np.allclose(out_5.numpy(), self.res_5_np) assert np.allclose(out_6.numpy(), self.res_3_np)
def test_dygraph(self): paddle.disable_static() a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_variable(x_data) out = paddle.cholesky(x, upper=False)
def test_dynamic_graph(self): for use_cuda in ([False, True] if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) x = paddle.to_variable(self.x_np) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[3, 3]) out_1 = adaptive_avg_pool(x=x) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=5) out_2 = adaptive_avg_pool(x=x) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[2, 5]) out_3 = adaptive_avg_pool(x=x) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d(output_size=[3, 3], data_format="NHWC") out_4 = adaptive_avg_pool(x=x) adaptive_avg_pool = paddle.nn.AdaptiveAvgPool2d( output_size=[None, 3]) out_5 = adaptive_avg_pool(x=x) assert np.allclose(out_1.numpy(), self.res_1_np) assert np.allclose(out_2.numpy(), self.res_2_np) assert np.allclose(out_3.numpy(), self.res_3_np) assert np.allclose(out_4.numpy(), self.res_4_np) assert np.allclose(out_5.numpy(), self.res_5_np)
def run_dygraph(x_np, op_str, use_gpu=True): place = paddle.CPUPlace() if use_gpu and fluid.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) paddle.disable_static(place) x = paddle.to_variable(x_np) dygraph_result = getattr(paddle.tensor, op_str)(x) return dygraph_result
def test_imperative_api(self): paddle.disable_static() np_x = np.array([10, 10]).astype('float64') x = paddle.to_variable(np_x) z = paddle.min(x, axis=0) np_z = z.numpy() z_expected = np.array(np.min(np_x, axis=0)) self.assertEqual((np_z == z_expected).all(), True)
def test_out(self): place = paddle.CUDAPlace( 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() paddle.disable_static(place) x1 = paddle.arange(0, 5, 1) x2 = paddle.tensor.arange(5) x3 = paddle.tensor.creation.arange(5) start = paddle.to_variable(np.array([0], 'float32')) end = paddle.to_variable(np.array([5], 'float32')) step = paddle.to_variable(np.array([1], 'float32')) x4 = paddle.arange(start, end, step, 'int64') paddle.enable_static() expected_data = np.arange(0, 5, 1).astype(np.int64) for i in [x1, x2, x3, x4]: self.assertEqual((i.numpy() == expected_data).all(), True)
def test_clip_dygraph(self): place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( ) else fluid.CPUPlace() paddle.disable_static(place) data_shape = [1, 9, 9, 4] data = np.random.random(data_shape).astype('float32') images = paddle.to_variable(data, dtype='float32') v_min = paddle.to_variable(np.array([0.2], dtype=np.float32)) v_max = paddle.to_variable(np.array([0.8], dtype=np.float32)) out_1 = paddle.clip(images, min=0.2, max=0.8) out_2 = paddle.clip(images, min=0.2, max=0.9) out_3 = paddle.clip(images, min=v_min, max=v_max) self.assertTrue(np.allclose(out_1.numpy(), data.clip(0.2, 0.8))) self.assertTrue(np.allclose(out_2.numpy(), data.clip(0.2, 0.9))) self.assertTrue(np.allclose(out_3.numpy(), data.clip(0.2, 0.8)))
def test_dygraph_4(self): paddle.disable_static() shape1 = [23, 12, 1] shape2 = [23, 1, 10] axis = 2 eps = 1e-6 np.random.seed(1) np_x1 = np.random.rand(*shape1).astype(np.float32) np_x2 = np.random.rand(*shape2).astype(np.float32) np_out = self._get_numpy_out(np_x1, np_x2, axis=axis, eps=eps) cos_sim_func = nn.CosineSimilarity(axis=axis, eps=eps) tesnor_x1 = paddle.to_variable(np_x1) tesnor_x2 = paddle.to_variable(np_x2) y = cos_sim_func(tesnor_x1, tesnor_x2) self.assertTrue(np.allclose(y.numpy(), np_out))
def test_with_zero_state(self): rnn1 = self.rnn1 rnn2 = self.rnn2 x = np.random.randn(4, 16) y1, h1 = rnn1(x) y2, h2 = rnn2(paddle.to_variable(x)) np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
def test_encoder(self): batch_size, d_model, n_head, dim_feedforward, dropout, attn_dropout, act_dropout, sequence_length = generate_basic_params( mode="encoder_layer") src = np.random.rand(batch_size, sequence_length, d_model).astype("float32") src_mask = np.zeros((batch_size, n_head, sequence_length, sequence_length)).astype("float32") src_mask[0][0][0][0] = -np.inf with fluid.dygraph.guard(fluid.CPUPlace()): encoder_layer = TransformerEncoderLayer(d_model, n_head, dim_feedforward, dropout) num_layers = 6 encoder = TransformerEncoder(encoder_layer, num_layers) # src, src_mask enc_output = encoder(paddle.to_variable(src), paddle.to_variable(src_mask))
def test_api(self): with fluid.dygraph.guard(): np_x = np.random.random([12, 14]).astype("float32") x = paddle.to_variable(np_x) positive_2 = np.array([2]).astype("int32") positive_2 = paddle.to_variable(positive_2) repeat_times = np.array([2, 3]).astype("int32") repeat_times = paddle.to_variable(repeat_times) out_1 = paddle.tile(x, repeat_times=[2, 3]) out_2 = paddle.tile(x, repeat_times=[positive_2, 3]) out_3 = paddle.tile(x, repeat_times=repeat_times) assert np.array_equal(out_1.numpy(), np.tile(np_x, (2, 3))) assert np.array_equal(out_2.numpy(), np.tile(np_x, (2, 3))) assert np.array_equal(out_3.numpy(), np.tile(np_x, (2, 3)))
def test_alias(self): paddle.disable_static(self.place) x = paddle.to_variable(self.x) out1 = paddle.logsumexp(x) out2 = paddle.tensor.logsumexp(x) out3 = paddle.tensor.math.logsumexp(x) out_ref = ref_logsumexp(self.x) for out in [out1, out2, out3]: self.assertTrue(np.allclose(out.numpy(), out_ref)) paddle.enable_static()
def run_imperative(self): input = paddle.to_variable(self.input_np) label = paddle.to_variable(self.label_np) dy_result = paddle.nn.functional.l1_loss(input, label) expected = np.mean(np.abs(self.input_np - self.label_np)) self.assertTrue(np.allclose(dy_result.numpy(), expected)) self.assertTrue(dy_result.shape, [1]) dy_result = paddle.nn.functional.l1_loss(input, label, reduction='sum') expected = np.sum(np.abs(self.input_np - self.label_np)) self.assertTrue(np.allclose(dy_result.numpy(), expected)) self.assertTrue(dy_result.shape, [1]) dy_result = paddle.nn.functional.l1_loss(input, label, reduction='none') expected = np.abs(self.input_np - self.label_np) self.assertTrue(np.allclose(dy_result.numpy(), expected)) self.assertTrue(dy_result.shape, [10, 10, 5])