def test_forward_backward_single_tensor_output(self):
        program = Program()
        with program_guard(program):
            x = layers.data(name='x', shape=[2], dtype='float32')
            x.stop_gradient = False  # For test gradient
            mask = layers.data(name='mask', shape=[1], dtype='int32')

            out = program.current_block().create_var(
                dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR)

            select_output(x, out, mask)
            y = select_input(out, mask)
            mean = layers.mean(y)
            append_backward(mean)

        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
        exe = Executor(place)

        feed_x = np.asarray([1.3, -1.4]).astype(np.float32)
        feed_mask = np.asarray([0]).astype(np.int32)
        ret = exe.run(program,
                      feed={
                          'x': feed_x,
                          'mask': feed_mask
                      },
                      fetch_list=[y.name, x.grad_name])
        x_grad = np.asarray([0.5, 0.5]).astype(np.float32)
        self.assertTrue(np.allclose(np.asarray(ret[0]), feed_x))
        self.assertTrue(np.allclose(np.asarray(ret[1]), x_grad))
Esempio n. 2
0
 def test_outputs_type():
     select_output(in1, out1, mask=mask_int32)
Esempio n. 3
0
 def test_mask_type():
     select_output(in1, [out1], mask=1)
Esempio n. 4
0
 def test_mask_dtype():
     select_output(in1, [out1], mask=mask_float32)
Esempio n. 5
0
 def test_input_type():
     select_output(1, [out1], mask_int32)