Exemple #1
0
 def setUp(self):
     self.main_program = Program()
     switch_main_program(self.main_program)
     x = layers.data('x', shape=[100], dtype='float32')
     x.stop_gradient = False
     rank_table_tensor = layers.data('rank_table_tensor',
                                     shape=[1],
                                     dtype='float32',
                                     lod_level=1)
     table = lod_rank_table(x=rank_table_tensor)
     i = layers.zeros(dtype='int64', shape=[1])
     self.mem1 = shrink_memory(x=x, i=i, table=table)
     i = layers.increment(x=i)
     i.stop_gradient = True
     self.mem2 = shrink_memory(x=self.mem1, i=i, table=table)
     i = layers.increment(x=i)
     i.stop_gradient = True
     self.mem3 = shrink_memory(x=self.mem2, i=i, table=table)
     mem3_mean = layers.mean(self.mem3)
     append_backward(loss=mem3_mean)
     self.x_grad = self.main_program.global_block().var('x@GRAD')
Exemple #2
0
    def test_plain_while_op(self):
        main_program = fluid.Program()
        startup_program = fluid.Program()

        with fluid.program_guard(main_program, startup_program):
            sentence = fluid.layers.data(name='word',
                                         shape=[1],
                                         dtype='int64',
                                         lod_level=1)
            sent_emb = fluid.layers.embedding(input=sentence,
                                              size=[len(self.word_dict), 32],
                                              dtype='float32')

            label = fluid.layers.data(name='label', shape=[1], dtype='float32')

            rank_table = lod_rank_table(x=sent_emb)

            sent_emb_array = lod_tensor_to_array(x=sent_emb, table=rank_table)

            seq_len = max_sequence_len(rank_table=rank_table)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            i.stop_gradient = False

            boot_mem = fluid.layers.fill_constant_batch_size_like(
                input=fluid.layers.array_read(array=sent_emb_array, i=i),
                value=0,
                shape=[-1, 100],
                dtype='float32')
            boot_mem.stop_gradient = False

            mem_array = fluid.layers.array_write(x=boot_mem, i=i)

            cond = fluid.layers.less_than(x=i, y=seq_len)
            cond.stop_gradient = False
            while_op = fluid.layers.While(cond=cond)
            out = fluid.layers.create_array(dtype='float32')

            with while_op.block():
                mem = fluid.layers.array_read(array=mem_array, i=i)
                ipt = fluid.layers.array_read(array=sent_emb_array, i=i)

                mem = shrink_memory(x=mem, i=i, table=rank_table)

                hidden = fluid.layers.fc(input=[mem, ipt],
                                         size=100,
                                         act='tanh')

                fluid.layers.array_write(x=hidden, i=i, array=out)
                fluid.layers.increment(x=i, in_place=True)
                fluid.layers.array_write(x=hidden, i=i, array=mem_array)
                fluid.layers.less_than(x=i, y=seq_len, cond=cond)

            all_timesteps = array_to_lod_tensor(x=out, table=rank_table)
            last = fluid.layers.sequence_last_step(input=all_timesteps)
            logits = fluid.layers.fc(input=last, size=1, act=None)
            loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logits,
                                                                  label=label)
            loss = fluid.layers.mean(loss)
            sgd = fluid.optimizer.SGD(1e-4)
            sgd.minimize(loss=loss)
        cpu = fluid.CPUPlace()
        exe = fluid.Executor(cpu)
        exe.run(startup_program)
        feeder = fluid.DataFeeder(feed_list=[sentence, label], place=cpu)

        data = next(self.train_data())
        val = exe.run(main_program, feed=feeder.feed(data),
                      fetch_list=[loss])[0]
        self.assertEqual((1, ), val.shape)
        print(val)
        self.assertFalse(numpy.isnan(val))
    def test_plain_while_op(self):
        main_program = fluid.Program()
        startup_program = fluid.Program()

        with fluid.program_guard(main_program, startup_program):
            sentence = fluid.layers.data(name='word',
                                         shape=[1],
                                         dtype='int64',
                                         lod_level=1)
            sent_emb = fluid.layers.embedding(input=sentence,
                                              size=[self.word_dict_len, 32],
                                              dtype='float32')

            rank_table = lod_rank_table(x=sent_emb)
            sent_emb_array = lod_tensor_to_array(x=sent_emb, table=rank_table)

            seq_len = max_sequence_len(rank_table=rank_table)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            i.stop_gradient = False

            boot_mem = fluid.layers.fill_constant_batch_size_like(
                input=fluid.layers.array_read(array=sent_emb_array, i=i),
                value=0,
                shape=[-1, 100],
                dtype='float32')
            boot_mem.stop_gradient = False
            mem_array = fluid.layers.array_write(x=boot_mem, i=i)

            cond = fluid.layers.less_than(x=i, y=seq_len)
            cond.stop_gradient = False
            while_op = fluid.layers.While(cond=cond)
            out = fluid.layers.create_array(dtype='float32')

            with while_op.block():
                mem = fluid.layers.array_read(array=mem_array, i=i)
                ipt = fluid.layers.array_read(array=sent_emb_array, i=i)

                mem = shrink_memory(x=mem, i=i, table=rank_table)

                hidden = fluid.layers.fc(input=[mem, ipt],
                                         size=100,
                                         act='tanh')

                fluid.layers.array_write(x=hidden, i=i, array=out)
                fluid.layers.increment(x=i, in_place=True)
                fluid.layers.array_write(x=hidden, i=i, array=mem_array)
                fluid.layers.less_than(x=i, y=seq_len, cond=cond)

            result_all_timesteps = array_to_lod_tensor(x=out, table=rank_table)
            last = fluid.layers.sequence_last_step(input=result_all_timesteps)

            logits = fluid.layers.fc(input=last, size=1, act=None)
            label = fluid.layers.data(name='label', shape=[1], dtype='float32')
            loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logits,
                                                                  label=label)
            loss = fluid.layers.mean(loss)
            sgd = fluid.optimizer.SGD(1e-4)
            sgd.minimize(loss=loss)

        # Check for lod_level set in compile-time.
        self.assertEqual(sent_emb.lod_level, result_all_timesteps.lod_level)

        self._train(main_program=main_program,
                    startup_program=startup_program,
                    feed_list=[sentence, label],
                    fetch_list=[sent_emb, result_all_timesteps, loss],
                    is_nested=False,
                    max_iters=1)
Exemple #4
0
 def test_table_type():
     out = shrink_memory(x=x, i=i, table=rank_table)
Exemple #5
0
 def test_i_type():
     out = shrink_memory(x=x, i=0, table=rank_table_tensor)
Exemple #6
0
 def test_x_type():
     out = shrink_memory(x=1, i=i, table=rank_table_tensor)