def main(self, tensor, expect_array, expect_lod, expect_max_len, level=0): place = self.place() program = Program() with program_guard(program): x = layers.data(name='x', shape=[10]) x.persistable = True table = layers.lod_rank_table(x, level=level) max_len = layers.max_sequence_len(table) max_len.persistable = True array = layers.lod_tensor_to_array(x, table) array.persistable = True result = layers.array_to_lod_tensor(array, table) result.persistable = True exe = Executor(place) scope = core.Scope() exe.run(program, feed={'x': tensor}, scope=scope) var = scope.find_var(array.name) array = var.get_lod_tensor_array() if expect_array is not None and expect_lod is not None: self.check_array_same(array, expect_array, expect_lod) self.check_tensor_same( scope.find_var(result.name).get_tensor(), tensor) self.assertEqual( numpy.array(scope.find_var(max_len.name).get_tensor())[0], expect_max_len)
def test_grad(self): place = core.CPUPlace() program = Program() with program_guard(program): x = layers.data(name='x', shape=[1], dtype='float32', stop_gradient=False) table = layers.lod_rank_table(x, level=0) array = layers.lod_tensor_to_array(x, table) result = layers.array_to_lod_tensor(array, table) mean = layers.mean(result) append_backward(mean) tensor = core.LoDTensor() tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place) tensor.set_recursive_sequence_lengths([[3, 6, 1]]) g_vars = program.global_block().var(x.name + "@GRAD") exe = Executor(place) g_out = [ numpy.array(item).sum() for item in exe.run(program, feed={'x': tensor}, fetch_list=[g_vars], return_numpy=False) ] g_out_sum = numpy.array(g_out).sum() self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
def test_grad(self): place = core.CPUPlace() program = Program() with program_guard(program): x = layers.data( name='x', shape=[1], dtype='float32', stop_gradient=False) table = layers.lod_rank_table(x, level=0) array = layers.lod_tensor_to_array(x, table) result = layers.array_to_lod_tensor(array, table) mean = layers.mean(result) append_backward(mean) tensor = core.LoDTensor() tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place) tensor.set_lod([[0, 3, 9, 10]]) g_vars = program.global_block().var(x.name + "@GRAD") exe = Executor(place) g_out = [ numpy.array(item).sum() for item in exe.run(program, feed={'x': tensor}, fetch_list=[g_vars], return_numpy=False) ] g_out_sum = numpy.array(g_out).sum() self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
def main(self, tensor, expect_array, expect_lod, expect_max_len, level=0): place = self.place() program = Program() with program_guard(program): x = layers.data(name='x', shape=[10]) x.persistable = True table = layers.lod_rank_table(x, level=level) max_len = layers.max_sequence_len(table) max_len.persistable = True array = layers.lod_tensor_to_array(x, table) array.persistable = True result = layers.array_to_lod_tensor(array, table) result.persistable = True exe = Executor(place) scope = core.Scope() exe.run(program, feed={'x': tensor}, scope=scope) var = scope.find_var(array.name) array = var.get_lod_tensor_array() if expect_array is not None and expect_lod is not None: self.check_array_same(array, expect_array, expect_lod) self.check_tensor_same(scope.find_var(result.name).get_tensor(), tensor) self.assertEqual( numpy.array(scope.find_var(max_len.name).get_tensor())[0], expect_max_len)
def test_lod_rank_table(self): x = data(name='x', shape=[100]) cpu = core.CPUPlace() rank_table = lod_rank_table(x=x, level=1) rank_table.persistable = True exe = Executor(cpu) scope = core.Scope() tensor = core.LoDTensor() tensor.set(numpy.random.random(size=(17, 100)), cpu) tensor.set_lod([[0, 1, 3], [0, 5, 6, 7], [0, 3, 4, 9, 10, 13, 16, 17]]) exe.run(scope=scope, feed={'x': tensor}) var = scope.find_var(rank_table.name) table = var.get_lod_rank_table() self.assertEqual([(0, 5), (1, 1), (2, 1)], table.items())
def setUp(self): self.main_program = Program() switch_main_program(self.main_program) x = layers.data('x', shape=[100], dtype='float32') x.stop_gradient = False rank_table_tensor = layers.data( 'rank_table_tensor', shape=[1], dtype='float32', lod_level=1) table = layers.lod_rank_table(x=rank_table_tensor) i = layers.zeros(dtype='int64', shape=[1]) self.mem1 = layers.shrink_memory(x=x, i=i, table=table) i = layers.increment(x=i) i.stop_gradient = True self.mem2 = layers.shrink_memory(x=self.mem1, i=i, table=table) i = layers.increment(x=i) i.stop_gradient = True self.mem3 = layers.shrink_memory(x=self.mem2, i=i, table=table) mem3_mean = layers.mean(self.mem3) append_backward(loss=mem3_mean) self.x_grad = self.main_program.global_block().var('x@GRAD')