def test_errors(self):
        with program_guard(Program(), Program()):
            x = numpy.random.random((10)).astype("float32")
            x2 = layers.data(name='x', shape=[10])
            table = lod_rank_table(x2, level=0)
            array = lod_tensor_to_array(x2, table)

            def test_x_Variable():
                rank_table = array_to_lod_tensor(x=x, table=table)

            self.assertRaises(TypeError, test_x_Variable)

            table2 = numpy.random.random((2)).astype("int64")

            def test_table_Variable():
                rank_table = array_to_lod_tensor(x=array, table=table2)

            self.assertRaises(TypeError, test_table_Variable)

            def test_x_list_Variable():
                rank_table = array_to_lod_tensor(x=[x], table=table)

            self.assertRaises(TypeError, test_x_list_Variable)

            def test_table_list_Variable():
                rank_table = array_to_lod_tensor(x=x2, table=[table2])

            self.assertRaises(TypeError, test_table_list_Variable)

            array = array_to_lod_tensor(x2, table)
    def main(self, tensor, expect_array, expect_lod, expect_max_len, level=0):
        place = self.place()
        program = Program()
        with program_guard(program):
            x = layers.data(name='x', shape=[10])
            x.persistable = True
            table = lod_rank_table(x, level=level)
            max_len = max_sequence_len(table)
            max_len.persistable = True
            array = lod_tensor_to_array(x, table)
            array.persistable = True

            result = array_to_lod_tensor(array, table)
            result.persistable = True
        exe = Executor(place)
        scope = core.Scope()
        exe.run(program, feed={'x': tensor}, scope=scope)
        var = scope.find_var(array.name)
        array = var.get_lod_tensor_array()
        if expect_array is not None and expect_lod is not None:
            self.check_array_same(array, expect_array, expect_lod)
        self.check_tensor_same(
            scope.find_var(result.name).get_tensor(), tensor)

        self.assertEqual(
            numpy.array(scope.find_var(max_len.name).get_tensor())[0],
            expect_max_len)
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        with program_guard(program):
            x = layers.data(name='x',
                            shape=[1],
                            dtype='float32',
                            stop_gradient=False)
            table = lod_rank_table(x, level=0)
            array = lod_tensor_to_array(x, table)
            result = array_to_lod_tensor(array, table)

            mean = layers.mean(result)

            append_backward(mean)

        tensor = core.LoDTensor()
        tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_recursive_sequence_lengths([[3, 6, 1]])

        g_vars = program.global_block().var(x.name + "@GRAD")

        exe = Executor(place)
        g_out = [
            numpy.array(item).sum() for item in exe.run(program,
                                                        feed={'x': tensor},
                                                        fetch_list=[g_vars],
                                                        return_numpy=False)
        ]
        g_out_sum = numpy.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
예제 #4
0
    def test_errors(self):
        with program_guard(Program(), Program()):
            x = numpy.random.random((2, 4)).astype("float32")

            def test_Variable():
                rank_table = lod_rank_table(x=x, level=1)

            self.assertRaises(TypeError, test_Variable)

            def test_list_Variable():
                rank_table = lod_rank_table(x=[x], level=1)

            self.assertRaises(TypeError, test_list_Variable)

            x = data(name='x', shape=[10], dtype='float32', lod_level=1)
            out = lod_rank_table(x=x, level=0)
            out = lod_rank_table(x=[x], level=0)
예제 #5
0
 def set_program(cls):
     dat = fluid.layers.data(name=cls.data_desc[0][0],
                             shape=cls.data_desc[0][1])
     dat.stop_gradient = False
     rank_dat = fluid.layers.data(name=cls.data_desc[1][0],
                                  shape=cls.data_desc[1][1])
     table = lod_rank_table(rank_dat)
     new_dat = fluid.layers.reorder_lod_tensor_by_rank(x=dat,
                                                       rank_table=table)
     loss = fluid.layers.reduce_sum(new_dat)
     fluid.backward.append_backward(loss=loss)
     cls.fetch_list = [new_dat, cls.data_desc[0][0] + '@GRAD']
예제 #6
0
 def sequence_slice(x, index):
     #offset = layers.fill_constant(shape=[1, args.batch_size], value=index, dtype='float32')
     #length = layers.fill_constant(shape=[1, args.batch_size], value=1, dtype='float32')
     #return layers.sequence_slice(x, offset, length)
     idx = layers.fill_constant(shape=[1], value=1, dtype='int32')
     idx.stop_gradient = True
     from paddle.fluid.layers.control_flow import lod_rank_table 
     from paddle.fluid.layers.control_flow import lod_tensor_to_array 
     from paddle.fluid.layers.control_flow import array_read 
     from paddle.fluid.layers.control_flow import array_to_lod_tensor 
     table = lod_rank_table(x, level=0)
     table.stop_gradient = True
     array = lod_tensor_to_array(x, table)
     slice_array = array_read(array=array, i=idx)
     return array_to_lod_tensor(slice_array, table)
예제 #7
0
    def test_lod_rank_table(self):
        x = data(name='x', shape=[100])
        cpu = core.CPUPlace()
        rank_table = lod_rank_table(x=x, level=1)
        rank_table.persistable = True
        exe = Executor(cpu)
        scope = core.Scope()

        tensor = core.LoDTensor()
        tensor.set(numpy.random.random(size=(17, 100)), cpu)
        tensor.set_recursive_sequence_lengths([[1, 2], [5, 1, 1],
                                               [3, 1, 5, 1, 3, 3, 1]])
        exe.run(scope=scope, feed={'x': tensor})
        var = scope.find_var(rank_table.name)
        table = var.get_lod_rank_table()
        self.assertEqual([(0, 5), (1, 1), (2, 1)], list(table.items()))
예제 #8
0
 def setUp(self):
     self.main_program = Program()
     switch_main_program(self.main_program)
     x = layers.data('x', shape=[100], dtype='float32')
     x.stop_gradient = False
     rank_table_tensor = layers.data('rank_table_tensor',
                                     shape=[1],
                                     dtype='float32',
                                     lod_level=1)
     table = lod_rank_table(x=rank_table_tensor)
     i = layers.zeros(dtype='int64', shape=[1])
     self.mem1 = shrink_memory(x=x, i=i, table=table)
     i = layers.increment(x=i)
     i.stop_gradient = True
     self.mem2 = shrink_memory(x=self.mem1, i=i, table=table)
     i = layers.increment(x=i)
     i.stop_gradient = True
     self.mem3 = shrink_memory(x=self.mem2, i=i, table=table)
     mem3_mean = layers.mean(self.mem3)
     append_backward(loss=mem3_mean)
     self.x_grad = self.main_program.global_block().var('x@GRAD')
예제 #9
0
    def test_plain_while_op(self):
        main_program = fluid.Program()
        startup_program = fluid.Program()

        with fluid.program_guard(main_program, startup_program):
            sentence = fluid.layers.data(name='word',
                                         shape=[1],
                                         dtype='int64',
                                         lod_level=1)
            sent_emb = fluid.layers.embedding(input=sentence,
                                              size=[len(self.word_dict), 32],
                                              dtype='float32')

            label = fluid.layers.data(name='label', shape=[1], dtype='float32')

            rank_table = lod_rank_table(x=sent_emb)

            sent_emb_array = lod_tensor_to_array(x=sent_emb, table=rank_table)

            seq_len = max_sequence_len(rank_table=rank_table)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            i.stop_gradient = False

            boot_mem = fluid.layers.fill_constant_batch_size_like(
                input=fluid.layers.array_read(array=sent_emb_array, i=i),
                value=0,
                shape=[-1, 100],
                dtype='float32')
            boot_mem.stop_gradient = False

            mem_array = fluid.layers.array_write(x=boot_mem, i=i)

            cond = fluid.layers.less_than(x=i, y=seq_len)
            cond.stop_gradient = False
            while_op = fluid.layers.While(cond=cond)
            out = fluid.layers.create_array(dtype='float32')

            with while_op.block():
                mem = fluid.layers.array_read(array=mem_array, i=i)
                ipt = fluid.layers.array_read(array=sent_emb_array, i=i)

                mem = shrink_memory(x=mem, i=i, table=rank_table)

                hidden = fluid.layers.fc(input=[mem, ipt],
                                         size=100,
                                         act='tanh')

                fluid.layers.array_write(x=hidden, i=i, array=out)
                fluid.layers.increment(x=i, in_place=True)
                fluid.layers.array_write(x=hidden, i=i, array=mem_array)
                fluid.layers.less_than(x=i, y=seq_len, cond=cond)

            all_timesteps = array_to_lod_tensor(x=out, table=rank_table)
            last = fluid.layers.sequence_last_step(input=all_timesteps)
            logits = fluid.layers.fc(input=last, size=1, act=None)
            loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logits,
                                                                  label=label)
            loss = fluid.layers.mean(loss)
            sgd = fluid.optimizer.SGD(1e-4)
            sgd.minimize(loss=loss)
        cpu = fluid.CPUPlace()
        exe = fluid.Executor(cpu)
        exe.run(startup_program)
        feeder = fluid.DataFeeder(feed_list=[sentence, label], place=cpu)

        data = next(self.train_data())
        val = exe.run(main_program, feed=feeder.feed(data),
                      fetch_list=[loss])[0]
        self.assertEqual((1, ), val.shape)
        print(val)
        self.assertFalse(numpy.isnan(val))
예제 #10
0
    def test_plain_while_op(self):
        main_program = fluid.Program()
        startup_program = fluid.Program()

        with fluid.program_guard(main_program, startup_program):
            sentence = fluid.layers.data(name='word',
                                         shape=[1],
                                         dtype='int64',
                                         lod_level=1)
            sent_emb = fluid.layers.embedding(input=sentence,
                                              size=[self.word_dict_len, 32],
                                              dtype='float32')

            rank_table = lod_rank_table(x=sent_emb)
            sent_emb_array = lod_tensor_to_array(x=sent_emb, table=rank_table)

            seq_len = max_sequence_len(rank_table=rank_table)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            i.stop_gradient = False

            boot_mem = fluid.layers.fill_constant_batch_size_like(
                input=fluid.layers.array_read(array=sent_emb_array, i=i),
                value=0,
                shape=[-1, 100],
                dtype='float32')
            boot_mem.stop_gradient = False
            mem_array = fluid.layers.array_write(x=boot_mem, i=i)

            cond = fluid.layers.less_than(x=i, y=seq_len)
            cond.stop_gradient = False
            while_op = fluid.layers.While(cond=cond)
            out = fluid.layers.create_array(dtype='float32')

            with while_op.block():
                mem = fluid.layers.array_read(array=mem_array, i=i)
                ipt = fluid.layers.array_read(array=sent_emb_array, i=i)

                mem = shrink_memory(x=mem, i=i, table=rank_table)

                hidden = fluid.layers.fc(input=[mem, ipt],
                                         size=100,
                                         act='tanh')

                fluid.layers.array_write(x=hidden, i=i, array=out)
                fluid.layers.increment(x=i, in_place=True)
                fluid.layers.array_write(x=hidden, i=i, array=mem_array)
                fluid.layers.less_than(x=i, y=seq_len, cond=cond)

            result_all_timesteps = array_to_lod_tensor(x=out, table=rank_table)
            last = fluid.layers.sequence_last_step(input=result_all_timesteps)

            logits = fluid.layers.fc(input=last, size=1, act=None)
            label = fluid.layers.data(name='label', shape=[1], dtype='float32')
            loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logits,
                                                                  label=label)
            loss = fluid.layers.mean(loss)
            sgd = fluid.optimizer.SGD(1e-4)
            sgd.minimize(loss=loss)

        # Check for lod_level set in compile-time.
        self.assertEqual(sent_emb.lod_level, result_all_timesteps.lod_level)

        self._train(main_program=main_program,
                    startup_program=startup_program,
                    feed_list=[sentence, label],
                    fetch_list=[sent_emb, result_all_timesteps, loss],
                    is_nested=False,
                    max_iters=1)
예제 #11
0
 def test_list_Variable():
     rank_table = lod_rank_table(x=[x], level=1)
예제 #12
0
 def test_Variable():
     rank_table = lod_rank_table(x=x, level=1)