def test_forward(self):
        data = layers.data(name='X', shape=[1], dtype='float32')
        data.stop_gradient = False
        cond = layers.ConditionalBlock(inputs=[data])
        out = layers.create_tensor(dtype='float32')
        with cond.block():
            hidden = layers.fc(input=data, size=10)
            layers.assign(hidden, out)

        cpu = core.CPUPlace()
        exe = Executor(cpu)
        exe.run(default_startup_program())

        x = numpy.random.random(size=(10, 1)).astype('float32')

        outs = exe.run(feed={'X': x}, fetch_list=[out])[0]
        print outs
        loss = layers.mean(x=out)
        append_backward_ops(loss=loss)
        outs = exe.run(
            feed={'X': x},
            fetch_list=[
                default_main_program().block(0).var(data.name + "@GRAD")
            ])[0]
        print outs
    def test_forward(self):
        data = layers.data(name='X', shape=[1], dtype='float32')
        data.stop_gradient = False
        cond = layers.ConditionalBlock(inputs=[data])
        out = layers.create_tensor(dtype='float32')
        with cond.block():
            hidden = layers.fc(input=data, size=10)
            layers.assign(hidden, out)

        cpu = core.CPUPlace()
        exe = Executor(cpu)
        exe.run(default_startup_program())

        x = numpy.random.random(size=(10, 1)).astype('float32')

        outs = exe.run(feed={'X': x}, fetch_list=[out])[0]
        print outs
        loss = layers.mean(x=out)
        append_backward_ops(loss=loss)
        outs = exe.run(feed={'X': x},
                       fetch_list=[
                           default_main_program().block(0).var(data.name +
                                                               "@GRAD")
                       ])[0]
        print outs
    def main(self, tensor, expect_array, expect_lod, expect_max_len, level=0):
        place = self.place()
        program = Program()
        x = layers.data(name='x', shape=[10], main_program=program)
        x.persistable = True
        table = layers.lod_rank_table(x, level=level, main_program=program)
        max_len = layers.max_sequence_len(table, main_program=program)
        max_len.persistable = True
        array = layers.lod_tensor_to_array(x, table, main_program=program)
        array.persistable = True

        result = layers.array_to_lod_tensor(array, table, main_program=program)
        result.persistable = True
        exe = Executor(place)
        scope = core.Scope()
        exe.run(program, feed={'x': tensor}, scope=scope)
        var = scope.find_var(array.name)
        array = var.get_lod_tensor_array()
        if expect_array is not None and expect_lod is not None:
            self.check_array_same(array, expect_array, expect_lod)
        self.check_tensor_same(scope.find_var(result.name).get_tensor(), tensor)

        self.assertEqual(
            numpy.array(scope.find_var(max_len.name).get_tensor())[0],
            expect_max_len)
示例#4
0
    def main(self, tensor, expect_array, expect_lod, expect_max_len, level=0):
        place = self.place()
        program = Program()
        x = layers.data(name='x', shape=[10], main_program=program)
        x.persistable = True
        table = layers.lod_rank_table(x, level=level, main_program=program)
        max_len = layers.max_sequence_len(table, main_program=program)
        max_len.persistable = True
        array = layers.lod_tensor_to_array(x, table, main_program=program)
        array.persistable = True

        result = layers.array_to_lod_tensor(array, table, main_program=program)
        result.persistable = True
        exe = Executor(place)
        scope = core.Scope()
        exe.run(program, feed={'x': tensor}, scope=scope)
        var = scope.find_var(array.name)
        array = var.get_lod_tensor_array()
        if expect_array is not None and expect_lod is not None:
            self.check_array_same(array, expect_array, expect_lod)
        self.check_tensor_same(
            scope.find_var(result.name).get_tensor(), tensor)

        self.assertEqual(
            numpy.array(scope.find_var(max_len.name).get_tensor())[0],
            expect_max_len)
    def test_shrink_rnn_memory(self):
        x = layers.data('x', shape=[100], dtype='float32')
        x.stop_gradient = False
        table = layers.lod_rank_table(x=x)
        i = layers.zeros(dtype='int64', shape=[1])
        mem1 = layers.shrink_memory(x=x, i=i, table=table)
        i = layers.increment(x=i)
        i.stop_gradient = True
        mem2 = layers.shrink_memory(x=mem1, i=i, table=table)
        i = layers.increment(x=i)
        i.stop_gradient = True
        mem3 = layers.shrink_memory(x=mem2, i=i, table=table)

        cpu = core.CPUPlace()
        tensor = core.LoDTensor()
        tensor.set_lod([[0, 2, 5, 6]])
        tensor_np = numpy.random.random(size=(3, 100)).astype('float32')
        tensor.set(tensor_np, cpu)
        exe = Executor(cpu)
        outs = exe.run(feed={'x': tensor}, fetch_list=[mem1, mem2, mem3])
        self.assertTrue(numpy.allclose(tensor_np[0:3], outs[0]))
        self.assertTrue(numpy.allclose(tensor_np[0:2], outs[1]))
        self.assertTrue(numpy.allclose(tensor_np[0:1], outs[2]))

        mem3_mean = layers.mean(x=mem3)
        append_backward_ops(loss=mem3_mean)
        x_grad = exe.run(
            feed={'x': tensor},
            fetch_list=[main_program.global_block().var('x@GRAD')])[0]
        self.assertAlmostEqual(1.0, x_grad.sum(), delta=0.1)
    def test_ifelse(self):
        kwargs = {'startup_program': Program(), 'main_program': Program()}
        image = layers.data(name='x', shape=[784], dtype='float32', **kwargs)

        label = layers.data(name='y', shape=[1], dtype='int64', **kwargs)

        limit = layers.fill_constant_batch_size_like(
            input=label, dtype='int64', shape=[1], value=5.0, **kwargs)

        cond = layers.less_than(x=label, y=limit, **kwargs)

        ie = layers.IfElse(cond, **kwargs)

        with ie.true_block():
            true_image = ie.input(image)
            hidden = layers.fc(input=true_image, size=100, act='tanh', **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            ie.output(prob)

        with ie.false_block():
            false_image = ie.input(image)
            hidden = layers.fc(input=false_image,
                               size=200,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            ie.output(prob)

        prob = ie()
        loss = layers.cross_entropy(input=prob[0], label=label, **kwargs)
        avg_loss = layers.mean(x=loss, **kwargs)

        optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
        optimizer.minimize(avg_loss, kwargs['startup_program'])
        train_reader = paddle.batch(
            paddle.reader.shuffle(
                paddle.dataset.mnist.train(), buf_size=8192),
            batch_size=200)

        place = core.CPUPlace()
        exe = Executor(place)

        exe.run(kwargs['startup_program'])
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
                x_data = np.array(map(lambda x: x[0], data)).astype("float32")
                y_data = np.array(map(lambda x: x[1], data)).astype("int64")
                y_data = y_data.reshape((y_data.shape[0], 1))

                outs = exe.run(kwargs['main_program'],
                               feed={'x': x_data,
                                     'y': y_data},
                               fetch_list=[avg_loss])
                print outs[0]
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)
    def main(self,
             tensor,
             mask,
             expect_true,
             expect_false,
             expect_out,
             level=0):
        place = self.place()
        program = Program()
        x = layers.data(name='x', shape=[1], main_program=program)
        x.persistable = True

        y = layers.data(name='y', shape=[1], main_program=program)
        y.persistable = True

        out_true, out_false = layers.split_lod_tensor(input=x,
                                                      mask=y,
                                                      level=level,
                                                      main_program=program)
        out_true.persistable = True
        out_false.persistable = True

        out = layers.merge_lod_tensor(in_true=out_true,
                                      in_false=out_false,
                                      mask=y,
                                      x=x,
                                      level=level,
                                      main_program=program)

        out.persistable = True

        exe = Executor(place)
        scope = core.Scope()
        exe.run(program,
                feed={
                    'x': tensor,
                    'y': mask
                },
                scope=scope,
                return_numpy=False)

        var_true = scope.find_var(out_true.name).get_tensor()

        var_false = scope.find_var(out_false.name).get_tensor()

        var_out = scope.find_var(out.name).get_tensor()

        self.check_tensor_same(var_true, expect_true)
        self.check_tensor_same(var_false, expect_false)
        self.check_tensor_same(var_out, expect_out)
示例#8
0
    def test_lod_rank_table(self):
        x = data(name='x', shape=[100])
        cpu = core.CPUPlace()
        rank_table = lod_rank_table(x=x, level=1)
        rank_table.persistable = True
        exe = Executor(cpu)
        scope = core.Scope()

        tensor = core.LoDTensor()
        tensor.set(numpy.random.random(size=(17, 100)), cpu)
        tensor.set_lod([[0, 1, 3], [0, 5, 6, 7], [0, 3, 4, 9, 10, 13, 16, 17]])
        exe.run(scope=scope, feed={'x': tensor})
        var = scope.find_var(rank_table.name)
        table = var.get_lod_rank_table()
        self.assertEqual([(0, 5), (1, 1), (2, 1)], table.items())
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        x = layers.data(
            name='x',
            shape=[1],
            dtype='float32',
            main_program=program,
            stop_gradient=False)
        table = layers.lod_rank_table(x, level=0, main_program=program)
        array = layers.lod_tensor_to_array(x, table, main_program=program)
        result = layers.array_to_lod_tensor(array, table, main_program=program)

        mean = layers.mean(x=result, main_program=program)

        append_backward_ops(mean)

        tensor = core.LoDTensor()
        tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_lod([[0, 3, 9, 10]])

        g_vars = program.global_block().var(x.name + "@GRAD")

        exe = Executor(place)
        g_out = [
            numpy.array(item).sum()
            for item in exe.run(program,
                                feed={'x': tensor},
                                fetch_list=[g_vars],
                                return_numpy=False)
        ]
        g_out_sum = numpy.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        x = layers.data(
            name='x',
            shape=[1],
            dtype='float32',
            main_program=program,
            stop_gradient=False)
        y = layers.data(
            name='y',
            shape=[1],
            dtype='bool',
            main_program=program,
            stop_gradient=False)

        level = 0

        out_true, out_false = layers.split_lod_tensor(
            input=x, mask=y, level=level, main_program=program)
        out = layers.merge_lod_tensor(
            in_true=out_true,
            in_false=out_false,
            mask=y,
            x=x,
            level=level,
            main_program=program)
        mean = layers.mean(x=out, main_program=program)

        append_backward_ops(mean)

        tensor = core.LoDTensor()
        tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_lod([[0, 3, 9, 10]])

        mask_np = np.array([0, 1, 0]).astype('bool')
        mask_np = np.expand_dims(mask_np, axis=1)

        mask = core.LoDTensor()
        mask.set(mask_np, place)

        exe = Executor(place)
        scope = core.Scope()

        g_vars = program.global_block().var(x.name + "@GRAD")
        g_out = [
            item.sum()
            for item in map(np.array,
                            exe.run(program,
                                    feed={'x': tensor,
                                          'y': mask},
                                    fetch_list=[g_vars],
                                    scope=scope,
                                    return_numpy=False))
        ]

        g_out_sum = np.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
示例#11
0
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        x = layers.data(name='x',
                        shape=[1],
                        dtype='float32',
                        main_program=program,
                        stop_gradient=False)
        table = layers.lod_rank_table(x, level=0, main_program=program)
        array = layers.lod_tensor_to_array(x, table, main_program=program)
        result = layers.array_to_lod_tensor(array, table, main_program=program)

        mean = layers.mean(x=result, main_program=program)

        append_backward_ops(mean)

        tensor = core.LoDTensor()
        tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_lod([[0, 3, 9, 10]])

        g_vars = program.global_block().var(x.name + "@GRAD")

        exe = Executor(place)
        g_out = [
            numpy.array(item).sum() for item in exe.run(program,
                                                        feed={'x': tensor},
                                                        fetch_list=[g_vars],
                                                        return_numpy=False)
        ]
        g_out_sum = numpy.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
示例#12
0
 def test_forward(self):
     x_np = np.random.normal(size=(2, 3)).astype("float32")
     self.feed_map = {'X': x_np}
     self.fetch_list = [self.Out]
     exe = Executor(self.place)
     out = exe.run(self.program,
                   feed=self.feed_map,
                   fetch_list=self.fetch_list)
     self.assertTrue(np.allclose(out[0], x_np, rtol=1e-5))
示例#13
0
 def test_array_length(self):
     tmp = layers.zeros(shape=[10], dtype='int32')
     i = layers.fill_constant(shape=[1], dtype='int64', value=10)
     arr = layers.array_write(tmp, i=i)
     arr_len = layers.array_length(arr)
     cpu = core.CPUPlace()
     exe = Executor(cpu)
     result = exe.run(fetch_list=[arr_len])[0]
     self.assertEqual(11, result[0])
 def test_forward(self):
     x_np = np.random.normal(size=(2, 3)).astype("float32")
     self.feed_map = {'X': x_np}
     self.fetch_list = [self.Out]
     exe = Executor(self.place)
     out = exe.run(self.program,
                   feed=self.feed_map,
                   fetch_list=self.fetch_list)
     self.assertTrue(np.allclose(out[0], x_np, rtol=1e-5))
    def main(self, tensor, mask, expect_true, expect_false, expect_out,
             level=0):
        place = self.place()
        program = Program()
        x = layers.data(name='x', shape=[1], main_program=program)
        x.persistable = True

        y = layers.data(name='y', shape=[1], main_program=program)
        y.persistable = True

        out_true, out_false = layers.split_lod_tensor(
            input=x, mask=y, level=level, main_program=program)
        out_true.persistable = True
        out_false.persistable = True

        out = layers.merge_lod_tensor(
            in_true=out_true,
            in_false=out_false,
            mask=y,
            x=x,
            level=level,
            main_program=program)

        out.persistable = True

        exe = Executor(place)
        scope = core.Scope()
        exe.run(program,
                feed={'x': tensor,
                      'y': mask},
                scope=scope,
                return_numpy=False)

        var_true = scope.find_var(out_true.name).get_tensor()

        var_false = scope.find_var(out_false.name).get_tensor()

        var_out = scope.find_var(out.name).get_tensor()

        self.check_tensor_same(var_true, expect_true)
        self.check_tensor_same(var_false, expect_false)
        self.check_tensor_same(var_out, expect_out)
示例#16
0
    def forward(self):
        self.feed_map = {
            x: create_tensor(getattr(self.py_rnn, x), self.place)
            for x in self.data_field
        }
        exe = Executor(self.place)
        out = exe.run(self.main_program,
                      feed=self.feed_map,
                      fetch_list=[self.output])

        return out[0]
示例#17
0
    def forward(self):
        self.feed_map = {
            x: create_tensor(getattr(self.py_rnn, x), self.place)
            for x in self.data_field
        }
        exe = Executor(self.place)
        out = exe.run(self.main_program,
                      feed=self.feed_map,
                      fetch_list=[self.output])

        return out[0]
示例#18
0
    def test_simple_forward(self):
        d0 = layers.data(
            "d0", shape=[10], append_batch_size=False, dtype='float32')
        d1 = layers.data(
            "d1", shape=[10], append_batch_size=False, dtype='float32')
        d2 = layers.data(
            "d2", shape=[10], append_batch_size=False, dtype='float32')
        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = True
        init = layers.zeros(shape=[10], dtype='float32')
        mem_array = layers.array_write(x=init, i=i)
        data_array = layers.array_write(x=d0, i=i)

        i = layers.increment(i)
        layers.array_write(d1, i, array=data_array)

        i = layers.increment(i)
        layers.array_write(d2, i, array=data_array)

        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = True

        array_len = layers.fill_constant(shape=[1], dtype='int64', value=3)
        array_len.stop_gradient = True
        cond = layers.less_than(x=i, y=array_len)

        while_op = layers.While(cond=cond)
        with while_op.block():
            d = layers.array_read(array=data_array, i=i)
            prev = layers.array_read(array=mem_array, i=i)
            result = layers.sums(input=[d, prev])

            i = layers.increment(x=i, in_place=True)
            layers.array_write(result, i=i, array=mem_array)
            layers.less_than(x=i, y=array_len, cond=cond)

        sum_result = layers.array_read(array=mem_array, i=i)
        loss = layers.mean(x=sum_result)

        append_backward_ops(loss)

        cpu = core.CPUPlace()
        exe = Executor(cpu)
        d = []

        for i in xrange(3):
            d.append(numpy.random.random(size=[10]).astype('float32'))

        outs = exe.run(feed={'d0': d[0],
                             'd1': d[1],
                             'd2': d[2]},
                       fetch_list=[sum_result])
        self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01)
示例#19
0
    def test_backward(self):
        self.feed_map = {
            name: np.random.normal(size=(2, 3)).astype("float32")
            for name in self.input_names
        }
        self.fetch_list = [self.output_vars['X@GRAD']]

        exe = Executor(self.place)
        out = exe.run(self.program,
                      feed=self.feed_map,
                      fetch_list=self.fetch_list)
        np.isclose(out[0], self.feed_map['Out@GRAD'], rtol=1e-5)
    def test_backward(self):
        self.feed_map = {
            name: np.random.normal(size=(2, 3)).astype("float32")
            for name in self.input_names
        }
        self.fetch_list = [self.output_vars['X@GRAD']]

        exe = Executor(self.place)
        out = exe.run(self.program,
                      feed=self.feed_map,
                      fetch_list=self.fetch_list)
        np.isclose(out[0], self.feed_map['Out@GRAD'], rtol=1e-5)
def main():
    rnn_out = encoder_decoder()
    label = layers.data(name="target_language_next_word",
                        shape=[1],
                        dtype='int64',
                        lod_level=1)
    cost = layers.cross_entropy(input=rnn_out, label=label)
    avg_cost = fluid.layers.mean(x=cost)

    optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4)
    optimizer.minimize(avg_cost)

    train_data = paddle.batch(paddle.reader.shuffle(
        paddle.dataset.wmt14.train(dict_size), buf_size=1000),
                              batch_size=batch_size)

    place = core.CPUPlace()
    exe = Executor(place)

    exe.run(framework.default_startup_program())

    batch_id = 0
    for pass_id in xrange(2):
        for data in train_data():
            word_data = to_lodtensor(map(lambda x: x[0], data), place)
            trg_word = to_lodtensor(map(lambda x: x[1], data), place)
            trg_word_next = to_lodtensor(map(lambda x: x[2], data), place)
            outs = exe.run(framework.default_main_program(),
                           feed={
                               'src_word_id': word_data,
                               'target_language_word': trg_word,
                               'target_language_next_word': trg_word_next
                           },
                           fetch_list=[avg_cost])
            avg_cost_val = np.array(outs[0])
            print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) +
                  " avg_cost=" + str(avg_cost_val))
            if batch_id > 3:
                exit(0)
            batch_id += 1
def main():
    rnn_out = encoder_decoder()
    label = layers.data(
        name="target_language_next_word", shape=[1], dtype='int64', lod_level=1)
    cost = layers.cross_entropy(input=rnn_out, label=label)
    avg_cost = fluid.layers.mean(x=cost)

    optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4)
    optimizer.minimize(avg_cost)

    train_data = paddle.batch(
        paddle.reader.shuffle(
            paddle.dataset.wmt14.train(dict_size), buf_size=1000),
        batch_size=batch_size)

    place = core.CPUPlace()
    exe = Executor(place)

    exe.run(framework.default_startup_program())

    batch_id = 0
    for pass_id in xrange(2):
        for data in train_data():
            word_data = to_lodtensor(map(lambda x: x[0], data), place)
            trg_word = to_lodtensor(map(lambda x: x[1], data), place)
            trg_word_next = to_lodtensor(map(lambda x: x[2], data), place)
            outs = exe.run(framework.default_main_program(),
                           feed={
                               'src_word_id': word_data,
                               'target_language_word': trg_word,
                               'target_language_next_word': trg_word_next
                           },
                           fetch_list=[avg_cost])
            avg_cost_val = np.array(outs[0])
            print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) +
                  " avg_cost=" + str(avg_cost_val))
            if batch_id > 3:
                exit(0)
            batch_id += 1
    def test_backward(self):
        self.feed_map = {
            name: np.random.normal(size=(2, 3)).astype("float32")
            for name in ['X', 'Out']
        }
        self.fetch_list = [self.output_vars['X@GRAD']]

        exe = Executor(self.place)
        out = exe.run(self.program,
                      feed=self.feed_map,
                      fetch_list=self.fetch_list)
        self.assertTrue(
            np.allclose(
                out[0], np.zeros(shape=(2, 3)).astype("float32"), rtol=1e-5))
示例#24
0
def paddle_random_normal(shape, loc=.0, scale=1., seed=1, dtype="float32"):
    program = framework.Program()
    block = program.global_block()
    w = block.create_var(
        dtype="float32",
        shape=shape,
        lod_level=0,
        name="param",
        initializer=initializer.NormalInitializer(
            loc=.0, scale=scale, seed=seed))
    place = core.CPUPlace()
    exe = Executor(place)
    out = exe.run(program, fetch_list=[w])
    return np.array(out[0])
示例#25
0
    def test_backward(self):
        self.feed_map = {
            name: np.random.normal(size=(2, 3)).astype("float32")
            for name in ['X', 'Out']
        }
        self.fetch_list = [self.output_vars['X@GRAD']]

        exe = Executor(self.place)
        out = exe.run(self.program,
                      feed=self.feed_map,
                      fetch_list=self.fetch_list)
        self.assertTrue(
            np.allclose(out[0],
                        np.zeros(shape=(2, 3)).astype("float32"),
                        rtol=1e-5))
示例#26
0
    def backward(self):
        self.feed_map = {
            x: create_tensor(getattr(self.py_rnn, x), self.place)
            for x in self.data_field
        }
        fetch_list = [
            self.main_program.global_block().var(x + "@GRAD")
            for x in self.data_field
        ]

        exe = Executor(self.place)
        return exe.run(self.main_program,
                       feed=self.feed_map,
                       fetch_list=fetch_list,
                       return_numpy=False)
示例#27
0
    def backward(self):
        self.feed_map = {
            x: create_tensor(getattr(self.py_rnn, x), self.place)
            for x in self.data_field
        }
        fetch_list = [
            self.main_program.global_block().var(x + "@GRAD")
            for x in self.data_field
        ]

        exe = Executor(self.place)
        return exe.run(self.main_program,
                       feed=self.feed_map,
                       fetch_list=fetch_list,
                       return_numpy=False)
 def test_mul(self):
     a = data(name='a', shape=[784], dtype='float32')
     b = data(
         name='b',
         shape=[784, 100],
         dtype='float32',
         append_batch_size=False)
     out = mul(x=a, y=b)
     place = core.CPUPlace()
     a_np = numpy.random.random((100, 784)).astype('float32')
     b_np = numpy.random.random((784, 100)).astype('float32')
     exe = Executor(place)
     outs = exe.run(feed={'a': a_np, 'b': b_np}, fetch_list=[out])
     out = outs[0]
     self.assertEqual((100, 100), out.shape)
     self.assertTrue(numpy.allclose(out, numpy.dot(a_np, b_np)))
示例#29
0
 def test_param(self):
     shape = [784, 100]
     val = 1.0625
     b = main_program.global_block()
     param = b.create_parameter(name='fc.w',
                                shape=shape,
                                dtype='float32',
                                initializer=ConstantInitializer(val))
     self.assertIsNotNone(param)
     self.assertEqual('fc.w', param.name)
     self.assertEqual((784, 100), param.shape)
     self.assertEqual(core.DataType.FP32, param.dtype)
     self.assertEqual(0, param.block.idx)
     exe = Executor(core.CPUPlace())
     p = exe.run(main_program, fetch_list=[param])[0]
     self.assertTrue(np.allclose(p, np.ones(shape) * val))
     p = io.get_parameter_value_by_name('fc.w', exe, main_program)
     self.assertTrue(np.allclose(np.array(p), np.ones(shape) * val))
示例#30
0
 def test_param(self):
     shape = [784, 100]
     val = 1.0625
     b = main_program.global_block()
     param = b.create_parameter(
         name='fc.w',
         shape=shape,
         dtype='float32',
         initializer=ConstantInitializer(val))
     self.assertIsNotNone(param)
     self.assertEqual('fc.w', param.name)
     self.assertEqual((784, 100), param.shape)
     self.assertEqual(core.DataType.FP32, param.dtype)
     self.assertEqual(0, param.block.idx)
     exe = Executor(core.CPUPlace())
     p = exe.run(main_program, fetch_list=[param])[0]
     self.assertTrue(np.allclose(p, np.ones(shape) * val))
     p = io.get_parameter_value_by_name('fc.w', exe, main_program)
     self.assertTrue(np.allclose(np.array(p), np.ones(shape) * val))
def main():
    cost = model()
    sgd_optimizer = SGDOptimizer(learning_rate=0.2)
    opts = sgd_optimizer.minimize(cost)

    if USE_GPU:
        place = core.GPUPlace(0)
    else:
        place = core.CPUPlace()

    exe = Executor(place)
    exe.run(framework.default_startup_program())

    train_reader = paddle.batch(paddle.reader.shuffle(
        paddle.dataset.movielens.train(), buf_size=8192),
                                batch_size=BATCH_SIZE)

    feeding = {
        'user_id': 0,
        'gender_id': 1,
        'age_id': 2,
        'job_id': 3,
        'movie_id': 4,
        'category_id': 5,
        'movie_title': 6,
        'score': 7
    }

    def func_feed(feeding, data):
        feed_tensors = {}
        for (key, idx) in feeding.iteritems():
            tensor = core.LoDTensor()
            if key != "category_id" and key != "movie_title":
                if key == "score":
                    numpy_data = np.array(map(lambda x: x[idx],
                                              data)).astype("float32")
                else:
                    numpy_data = np.array(map(lambda x: x[idx],
                                              data)).astype("int64")
            else:
                numpy_data = map(lambda x: np.array(x[idx]).astype("int64"),
                                 data)
                lod_info = [len(item) for item in numpy_data]
                offset = 0
                lod = [offset]
                for item in lod_info:
                    offset += item
                    lod.append(offset)
                numpy_data = np.concatenate(numpy_data, axis=0)
                tensor.set_lod([lod])

            numpy_data = numpy_data.reshape([numpy_data.shape[0], 1])
            tensor.set(numpy_data, place)
            feed_tensors[key] = tensor
        return feed_tensors

    PASS_NUM = 100
    for pass_id in range(PASS_NUM):
        for data in train_reader():
            outs = exe.run(framework.default_main_program(),
                           feed=func_feed(feeding, data),
                           fetch_list=[cost])
            out = np.array(outs[0])
            if out[0] < 6.0:
                # if avg cost less than 6.0, we think our code is good.
                exit(0)
示例#32
0
    def test_raw_api(self):
        kwargs = {'startup_program': Program(), 'main_program': Program()}
        image = layers.data(name='x', shape=[784], dtype='float32', **kwargs)

        label = layers.data(name='y', shape=[1], dtype='int64', **kwargs)

        limit = layers.fill_constant_batch_size_like(input=label,
                                                     dtype='int64',
                                                     shape=[1],
                                                     value=5.0,
                                                     **kwargs)

        cond = layers.less_than(x=label, y=limit, **kwargs)
        true_image, false_image = layers.split_lod_tensor(input=image,
                                                          mask=cond,
                                                          **kwargs)

        true_out = layers.create_tensor(dtype='float32', **kwargs)
        true_cond = layers.ConditionalBlock([true_image], **kwargs)

        with true_cond.block():
            hidden = layers.fc(input=true_image,
                               size=100,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            layers.assign(input=prob, output=true_out, **kwargs)

        false_out = layers.create_tensor(dtype='float32', **kwargs)
        false_cond = layers.ConditionalBlock([false_image], **kwargs)

        with false_cond.block():
            hidden = layers.fc(input=false_image,
                               size=200,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            layers.assign(input=prob, output=false_out, **kwargs)

        prob = layers.merge_lod_tensor(in_true=true_out,
                                       in_false=false_out,
                                       mask=cond,
                                       x=image,
                                       **kwargs)
        loss = layers.cross_entropy(input=prob, label=label, **kwargs)
        avg_loss = layers.mean(x=loss, **kwargs)

        optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
        optimizer.minimize(avg_loss, kwargs['startup_program'])

        train_reader = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.mnist.train(), buf_size=8192),
                                    batch_size=200)

        place = core.CPUPlace()
        exe = Executor(place)

        exe.run(kwargs['startup_program'])
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
                x_data = np.array(map(lambda x: x[0], data)).astype("float32")
                y_data = np.array(map(lambda x: x[1], data)).astype("int64")
                y_data = np.expand_dims(y_data, axis=1)

                outs = exe.run(kwargs['main_program'],
                               feed={
                                   'x': x_data,
                                   'y': y_data
                               },
                               fetch_list=[avg_loss])
                print outs[0]
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)
示例#33
0
def main():
    cost, u, m = model()
    optimizer = SGD(learning_rate=0.2)
    # optimizer = Adam(learning_rate=1e-4)
    opts = optimizer.minimize(cost)

    if USE_GPU:
        place = core.GPUPlace(0)
    else:
        place = core.CPUPlace()

    exe = Executor(place)
    exe.run(framework.default_startup_program())
    # print framework.default_main_program().block(0).vars.keys()
    print framework.default_main_program().block(0).vars.get(
        "embedding_0.tmp_0")

    train_reader = paddle.batch(paddle.dataset.movielens.train(),
                                batch_size=BATCH_SIZE)

    feeding = {
        'user_id': 0,
        'gender_id': 1,
        'age_id': 2,
        'job_id': 3,
        'movie_id': 4,
        # 'category_id': 5,
        'movie_title': 6,
        'score': 7
    }

    def func_feed(feeding, data):
        feed_tensors = {}
        for (key, idx) in feeding.iteritems():
            tensor = core.LoDTensor()
            if key != "category_id" and key != "movie_title":
                if key == "score":
                    numpy_data = np.array(map(lambda x: x[idx],
                                              data)).astype("float32")
                else:
                    numpy_data = np.array(map(lambda x: x[idx],
                                              data)).astype("int64")
            else:
                numpy_data = map(lambda x: np.array(x[idx]).astype("int64"),
                                 data)
                lod_info = [len(item) for item in numpy_data]
                offset = 0
                lod = [offset]
                for item in lod_info:
                    offset += item
                    lod.append(offset)
                numpy_data = np.concatenate(numpy_data, axis=0)
                tensor.set_lod([lod])

            numpy_data = numpy_data.reshape([numpy_data.shape[0], 1])
            tensor.set(numpy_data, place)
            feed_tensors[key] = tensor
        return feed_tensors

    PASS_NUM = 5
    for pass_id in range(PASS_NUM):
        batch_id = 0
        ts = time.time()
        for data in train_reader():
            outs = exe.run(framework.default_main_program(),
                           feed=func_feed(feeding, data),
                           fetch_list=[cost, u, m])
            out = np.array(outs[0])
            if batch_id % 100 == 0:
                print("pass %d, batch %d, cost: %f" %
                      (pass_id, batch_id, out[0]))
                print(outs[1])
                print(outs[2])
            batch_id += 1
            # if out[0] < 6.0:
            #     # if avg cost less than 6.0, we think our code is good.
            #     exit(0)
        print("pass %d, cost: %f, time: %f" %
              (pass_id, out[0], time.time() - ts))
示例#34
0
    def test_ifelse(self):
        kwargs = {'startup_program': Program(), 'main_program': Program()}
        image = layers.data(name='x', shape=[784], dtype='float32', **kwargs)

        label = layers.data(name='y', shape=[1], dtype='int64', **kwargs)

        limit = layers.fill_constant_batch_size_like(input=label,
                                                     dtype='int64',
                                                     shape=[1],
                                                     value=5.0,
                                                     **kwargs)

        cond = layers.less_than(x=label, y=limit, **kwargs)

        ie = layers.IfElse(cond, **kwargs)

        with ie.true_block():
            true_image = ie.input(image)
            hidden = layers.fc(input=true_image,
                               size=100,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            ie.output(prob)

        with ie.false_block():
            false_image = ie.input(image)
            hidden = layers.fc(input=false_image,
                               size=200,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            ie.output(prob)

        prob = ie()
        loss = layers.cross_entropy(input=prob[0], label=label, **kwargs)
        avg_loss = layers.mean(x=loss, **kwargs)

        optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
        optimizer.minimize(avg_loss, kwargs['startup_program'])
        train_reader = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.mnist.train(), buf_size=8192),
                                    batch_size=200)

        place = core.CPUPlace()
        exe = Executor(place)

        exe.run(kwargs['startup_program'])
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
                x_data = np.array(map(lambda x: x[0], data)).astype("float32")
                y_data = np.array(map(lambda x: x[1], data)).astype("int64")
                y_data = y_data.reshape((y_data.shape[0], 1))

                outs = exe.run(kwargs['main_program'],
                               feed={
                                   'x': x_data,
                                   'y': y_data
                               },
                               fetch_list=[avg_loss])
                print outs[0]
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)
示例#35
0
                                                        seed=SEED))

cost = layers.cross_entropy(input=predict, label=label)
avg_cost = layers.mean(x=cost)
optimizer = AdamOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999)
opts = optimizer.minimize(avg_cost)

accuracy = evaluator.Accuracy(input=predict, label=label)

train_reader = paddle.batch(paddle.dataset.mnist.train(),
                            batch_size=BATCH_SIZE)

place = core.CPUPlace()
exe = Executor(place)

exe.run(framework.default_startup_program())

for pass_id in range(PASS_NUM):
    accuracy.reset(exe)
    pass_start = time.clock()
    for batch_id, data in enumerate(train_reader()):
        img_data = np.array(map(lambda x: x[0].reshape([1, 28, 28]),
                                data)).astype(DTYPE)
        y_data = np.array(map(lambda x: x[1], data)).astype("int64")
        y_data = y_data.reshape([len(y_data), 1])

        tensor_img = core.LoDTensor()
        tensor_y = core.LoDTensor()
        tensor_img.set(img_data, place)
        tensor_y.set(y_data, place)
示例#36
0
    def test_raw_api(self):
        kwargs = {'startup_program': Program(), 'main_program': Program()}
        image = layers.data(name='x', shape=[784], dtype='float32', **kwargs)

        label = layers.data(name='y', shape=[1], dtype='int64', **kwargs)

        limit = layers.fill_constant_batch_size_like(
            input=label, dtype='int64', shape=[1], value=5.0, **kwargs)

        cond = layers.less_than(x=label, y=limit, **kwargs)
        true_image, false_image = layers.split_lod_tensor(
            input=image, mask=cond, **kwargs)

        true_out = layers.create_tensor(dtype='float32', **kwargs)
        true_cond = layers.ConditionalBlock([true_image], **kwargs)

        with true_cond.block():
            hidden = layers.fc(input=true_image, size=100, act='tanh', **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            layers.assign(input=prob, output=true_out, **kwargs)

        false_out = layers.create_tensor(dtype='float32', **kwargs)
        false_cond = layers.ConditionalBlock([false_image], **kwargs)

        with false_cond.block():
            hidden = layers.fc(input=false_image,
                               size=200,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            layers.assign(input=prob, output=false_out, **kwargs)

        prob = layers.merge_lod_tensor(
            in_true=true_out, in_false=false_out, mask=cond, x=image, **kwargs)
        loss = layers.cross_entropy(input=prob, label=label, **kwargs)
        avg_loss = layers.mean(x=loss, **kwargs)

        optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
        optimizer.minimize(avg_loss, kwargs['startup_program'])

        train_reader = paddle.batch(
            paddle.reader.shuffle(
                paddle.dataset.mnist.train(), buf_size=8192),
            batch_size=200)

        place = core.CPUPlace()
        exe = Executor(place)

        exe.run(kwargs['startup_program'])
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
                x_data = np.array(map(lambda x: x[0], data)).astype("float32")
                y_data = np.array(map(lambda x: x[1], data)).astype("int64")
                y_data = np.expand_dims(y_data, axis=1)

                outs = exe.run(kwargs['main_program'],
                               feed={'x': x_data,
                                     'y': y_data},
                               fetch_list=[avg_loss])
                print outs[0]
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)
示例#37
0
    def check_output_with_place(self, place, atol):
        op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)

        program = Program()
        block = program.global_block()

        inputs = append_input_output(block, op_proto, self.inputs, True)
        outputs = append_input_output(block, op_proto, self.outputs, False)
        op = block.append_op(
            type=self.op_type,
            inputs=inputs,
            outputs=outputs,
            attrs=self.attrs if hasattr(self, "attrs") else dict())
        # infer variable type and infer shape in compile-time
        op.desc.infer_var_type(block.desc)
        op.desc.infer_shape(block.desc)

        fetch_list = []
        for var_name, var in outputs.iteritems():
            if var_name in self.outputs:
                if isinstance(var, list):
                    for v in var:
                        fetch_list.append(v)
                else:
                    fetch_list.append(var)

        feed_map = self.feed_var(inputs, place)

        exe = Executor(place)
        outs = exe.run(program,
                       feed=feed_map,
                       fetch_list=fetch_list,
                       return_numpy=False)

        for out_name, out_dup in Operator.get_op_outputs(self.op_type):
            if out_name not in self.outputs:
                continue

            def find_actual(target_name, fetch_list):
                found = [
                    i for i, var in enumerate(fetch_list)
                    if var.name == target_name
                ]
                self.assertTrue(
                    len(found) == 1, "Found {} {}".format(
                        len(found), target_name))
                return found[0]

            if out_dup:
                sub_out = self.outputs[out_name]
                if not isinstance(sub_out, list):
                    raise AssertionError("sub_out type %s is not list",
                                         type(sub_out))
                for sub_out_name, expect in sub_out:
                    idx = find_actual(sub_out_name, fetch_list)
                    actual = outs[idx]
                    actual_t = np.array(actual)
                    expect_t = expect[0] \
                        if isinstance(expect, tuple) else expect
                    self.assertTrue(
                        np.allclose(
                            actual_t, expect_t, atol=atol),
                        "Output (" + sub_out_name + ") has diff at " +
                        str(place))
                    if isinstance(expect, tuple):
                        self.assertListEqual(
                            actual.lod(), expect[1], "Output (" + sub_out_name +
                            ") has different lod at " + str(place))
            else:
                idx = find_actual(out_name, fetch_list)
                actual = outs[idx]
                actual_t = np.array(actual)
                expect = self.outputs[out_name]
                expect_t = expect[0] if isinstance(expect, tuple) else expect
                self.assertTrue(
                    np.allclose(
                        actual_t, expect_t, atol=atol),
                    "Output (" + out_name + ") has diff at " + str(place))
                if isinstance(expect, tuple):
                    self.assertListEqual(actual.lod(), expect[1],
                                         "Output (" + out_name +
                                         ") has different lod at " + str(place))
def main():
    cost = model()
    sgd_optimizer = SGDOptimizer(learning_rate=0.2)
    opts = sgd_optimizer.minimize(cost)

    if USE_GPU:
        place = core.GPUPlace(0)
    else:
        place = core.CPUPlace()

    exe = Executor(place)
    exe.run(framework.default_startup_program())

    train_reader = paddle.batch(
        paddle.reader.shuffle(
            paddle.dataset.movielens.train(), buf_size=8192),
        batch_size=BATCH_SIZE)

    feeding = {
        'user_id': 0,
        'gender_id': 1,
        'age_id': 2,
        'job_id': 3,
        'movie_id': 4,
        'category_id': 5,
        'movie_title': 6,
        'score': 7
    }

    def func_feed(feeding, data):
        feed_tensors = {}
        for (key, idx) in feeding.iteritems():
            tensor = core.LoDTensor()
            if key != "category_id" and key != "movie_title":
                if key == "score":
                    numpy_data = np.array(map(lambda x: x[idx], data)).astype(
                        "float32")
                else:
                    numpy_data = np.array(map(lambda x: x[idx], data)).astype(
                        "int64")
            else:
                numpy_data = map(lambda x: np.array(x[idx]).astype("int64"),
                                 data)
                lod_info = [len(item) for item in numpy_data]
                offset = 0
                lod = [offset]
                for item in lod_info:
                    offset += item
                    lod.append(offset)
                numpy_data = np.concatenate(numpy_data, axis=0)
                tensor.set_lod([lod])

            numpy_data = numpy_data.reshape([numpy_data.shape[0], 1])
            tensor.set(numpy_data, place)
            feed_tensors[key] = tensor
        return feed_tensors

    PASS_NUM = 100
    for pass_id in range(PASS_NUM):
        for data in train_reader():
            outs = exe.run(framework.default_main_program(),
                           feed=func_feed(feeding, data),
                           fetch_list=[cost])
            out = np.array(outs[0])
            if out[0] < 6.0:
                # if avg cost less than 6.0, we think our code is good.
                exit(0)
示例#39
0
    def _get_gradient(self, input_to_check, place, output_names, no_grad_set):
        prog = Program()
        block = prog.global_block()
        inputs_with_np = {
            key: value
            for (key, value) in OpTest._create_var_descs_(
                block, getattr(self, 'inputs', {}))
        }
        outputs_with_np = {
            key: val
            for (key, val) in OpTest._create_var_descs_(
                block, getattr(self, 'outputs', {}))
        }
        inputs = {
            k: [item[0] for item in inputs_with_np[k]]
            for k in inputs_with_np
        }
        outputs = {
            k: [item[0] for item in outputs_with_np[k]]
            for k in outputs_with_np
        }

        op = block.append_op(
            type=self.op_type,
            inputs=inputs,
            outputs=outputs,
            attrs=getattr(self, 'attrs', {}))

        # infer variable type and infer shape in compile-time
        op.desc.infer_var_type(block.desc)
        op.desc.infer_shape(block.desc)

        mean_inputs = map(block.var, output_names)

        if len(mean_inputs) == 1:
            loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1])
            op = block.append_op(
                inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean')
            op.desc.infer_var_type(block.desc)
            op.desc.infer_shape(block.desc)
        else:
            avg_sum = []
            for cur_loss in mean_inputs:
                cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1])
                op = block.append_op(
                    inputs={"X": [cur_loss]},
                    outputs={"Out": [cur_avg_loss]},
                    type="mean")
                op.desc.infer_var_type(block.desc)
                op.desc.infer_shape(block.desc)
                avg_sum.append(cur_avg_loss)

            loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1])
            op_sum = block.append_op(
                inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum')
            op_sum.desc.infer_var_type(block.desc)
            op_sum.desc.infer_shape(block.desc)

            loss = block.create_var(dtype=loss_sum.dtype, shape=[1])
            op_loss = block.append_op(
                inputs={"X": loss_sum},
                outputs={"Out": loss},
                type='scale',
                attrs={'scale': 1.0 / float(len(avg_sum))})
            op_loss.desc.infer_var_type(block.desc)
            op_loss.desc.infer_shape(block.desc)

        param_grad_list = append_backward_ops(
            loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set)

        feed_dict = {
            item[0].name: OpTest._numpy_to_lod_tensor(item[1], item[2], place)
            for p_name in inputs_with_np for item in inputs_with_np[p_name]
        }

        fetch_list = [g for p, g in param_grad_list]
        executor = Executor(place)
        return map(
            np.array,
            executor.run(prog, feed_dict, fetch_list, return_numpy=False))
    def test_read_write(self):
        x = [
            layers.data(name='x0', shape=[100]),
            layers.data(name='x1', shape=[100]),
            layers.data(name='x2', shape=[100])
        ]

        for each_x in x:
            each_x.stop_gradient = False

        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = False
        arr = layers.array_write(x=x[0], i=i)
        i = layers.increment(x=i)
        arr = layers.array_write(x=x[1], i=i, array=arr)
        i = layers.increment(x=i)
        arr = layers.array_write(x=x[2], i=i, array=arr)

        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = False
        a0 = layers.array_read(array=arr, i=i)
        i = layers.increment(x=i)
        a1 = layers.array_read(array=arr, i=i)
        i = layers.increment(x=i)
        a2 = layers.array_read(array=arr, i=i)

        mean_a0 = layers.mean(x=a0)
        mean_a1 = layers.mean(x=a1)
        mean_a2 = layers.mean(x=a2)

        a_sum = layers.sums(input=[mean_a0, mean_a1, mean_a2])

        mean_x0 = layers.mean(x=x[0])
        mean_x1 = layers.mean(x=x[1])
        mean_x2 = layers.mean(x=x[2])

        x_sum = layers.sums(input=[mean_x0, mean_x1, mean_x2])

        scope = core.Scope()
        cpu = core.CPUPlace()

        exe = Executor(cpu)

        tensor = numpy.random.random(size=(100, 100)).astype('float32')

        outs = exe.run(feed={
            'x0': tensor,
            'x1': tensor,
            'x2': tensor
        },
                       fetch_list=[a_sum, x_sum],
                       scope=scope)
        self.assertEqual(outs[0], outs[1])

        total_sum = layers.sums(input=[a_sum, x_sum])
        total_sum_scaled = layers.scale(x=total_sum, scale=1 / 6.0)

        append_backward_ops(total_sum_scaled)

        g_vars = map(default_main_program().global_block().var,
                     [each_x.name + "@GRAD" for each_x in x])
        g_out = [
            item.sum() for item in exe.run(feed={
                'x0': tensor,
                'x1': tensor,
                'x2': tensor
            },
                                           fetch_list=g_vars)
        ]
        g_out_sum = numpy.array(g_out).sum()

        # since our final gradient is 1 and the neural network are all linear
        # with mean_op.
        # the input gradient should also be 1
        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
    def test_read_write(self):
        x = [
            layers.data(
                name='x0', shape=[100]), layers.data(
                    name='x1', shape=[100]), layers.data(
                        name='x2', shape=[100])
        ]

        for each_x in x:
            each_x.stop_gradient = False

        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = False
        arr = layers.array_write(x=x[0], i=i)
        i = layers.increment(x=i)
        arr = layers.array_write(x=x[1], i=i, array=arr)
        i = layers.increment(x=i)
        arr = layers.array_write(x=x[2], i=i, array=arr)

        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = False
        a0 = layers.array_read(array=arr, i=i)
        i = layers.increment(x=i)
        a1 = layers.array_read(array=arr, i=i)
        i = layers.increment(x=i)
        a2 = layers.array_read(array=arr, i=i)

        mean_a0 = layers.mean(x=a0)
        mean_a1 = layers.mean(x=a1)
        mean_a2 = layers.mean(x=a2)

        a_sum = layers.sums(input=[mean_a0, mean_a1, mean_a2])

        mean_x0 = layers.mean(x=x[0])
        mean_x1 = layers.mean(x=x[1])
        mean_x2 = layers.mean(x=x[2])

        x_sum = layers.sums(input=[mean_x0, mean_x1, mean_x2])

        scope = core.Scope()
        cpu = core.CPUPlace()

        exe = Executor(cpu)

        tensor = numpy.random.random(size=(100, 100)).astype('float32')

        outs = exe.run(feed={'x0': tensor,
                             'x1': tensor,
                             'x2': tensor},
                       fetch_list=[a_sum, x_sum],
                       scope=scope)
        self.assertEqual(outs[0], outs[1])

        total_sum = layers.sums(input=[a_sum, x_sum])
        total_sum_scaled = layers.scale(x=total_sum, scale=1 / 6.0)

        append_backward_ops(total_sum_scaled)

        g_vars = map(default_main_program().global_block().var,
                     [each_x.name + "@GRAD" for each_x in x])
        g_out = [
            item.sum()
            for item in exe.run(
                feed={'x0': tensor,
                      'x1': tensor,
                      'x2': tensor},
                fetch_list=g_vars)
        ]
        g_out_sum = numpy.array(g_out).sum()

        # since our final gradient is 1 and the neural network are all linear
        # with mean_op.
        # the input gradient should also be 1
        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        x = layers.data(name='x',
                        shape=[1],
                        dtype='float32',
                        main_program=program,
                        stop_gradient=False)
        y = layers.data(name='y',
                        shape=[1],
                        dtype='bool',
                        main_program=program,
                        stop_gradient=False)

        level = 0

        out_true, out_false = layers.split_lod_tensor(input=x,
                                                      mask=y,
                                                      level=level,
                                                      main_program=program)
        out = layers.merge_lod_tensor(in_true=out_true,
                                      in_false=out_false,
                                      mask=y,
                                      x=x,
                                      level=level,
                                      main_program=program)
        mean = layers.mean(x=out, main_program=program)

        append_backward_ops(mean)

        tensor = core.LoDTensor()
        tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_lod([[0, 3, 9, 10]])

        mask_np = np.array([0, 1, 0]).astype('bool')
        mask_np = np.expand_dims(mask_np, axis=1)

        mask = core.LoDTensor()
        mask.set(mask_np, place)

        exe = Executor(place)
        scope = core.Scope()

        g_vars = program.global_block().var(x.name + "@GRAD")
        g_out = [
            item.sum() for item in map(
                np.array,
                exe.run(program,
                        feed={
                            'x': tensor,
                            'y': mask
                        },
                        fetch_list=[g_vars],
                        scope=scope,
                        return_numpy=False))
        ]

        g_out_sum = np.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)