def not_test_raw_api(self): prog = Program() startup_prog = Program() with program_guard(prog, startup_prog): image = layers.data(name='x', shape=[784], dtype='float32') label = layers.data(name='y', shape=[1], dtype='int64') limit = layers.fill_constant(shape=[1], dtype='int64', value=5) cond = layers.less_than(x=label, y=limit) true_image, false_image = split_lod_tensor(input=image, mask=cond) true_out = layers.create_tensor(dtype='float32') true_cond = ConditionalBlock([cond]) with true_cond.block(): hidden = layers.fc(input=true_image, size=100, act='tanh') prob = layers.fc(input=hidden, size=10, act='softmax') layers.assign(input=prob, output=true_out) false_out = layers.create_tensor(dtype='float32') false_cond = ConditionalBlock([cond]) with false_cond.block(): hidden = layers.fc(input=false_image, size=200, act='tanh') prob = layers.fc(input=hidden, size=10, act='softmax') layers.assign(input=prob, output=false_out) prob = merge_lod_tensor( in_true=true_out, in_false=false_out, mask=cond, x=image) loss = layers.cross_entropy(input=prob, label=label) avg_loss = layers.mean(loss) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(avg_loss, startup_prog) train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=8192), batch_size=10) place = core.CPUPlace() exe = Executor(place) exe.run(startup_prog) PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): x_data = np.array([x[0] for x in data]).astype("float32") y_data = np.array([x[1] for x in data]).astype("int64") y_data = np.expand_dims(y_data, axis=1) outs = exe.run(prog, feed={'x': x_data, 'y': y_data}, fetch_list=[avg_loss]) print(outs[0]) if outs[0] < 1.0: return self.assertFalse(True)
def test_xfalse(): out = merge_lod_tensor( int_true=x_true, in_false=set(), x=input_data, mask=y, level=level)
def test_x(): out = merge_lod_tensor( int_true=x_true, in_false=x_false, x=set(), mask=y, level=level)
def test_grad(self): place = core.CPUPlace() program = Program() with program_guard(program): x = layers.data(name='x', shape=[1], dtype='float32', stop_gradient=False) y = layers.data(name='y', shape=[1], dtype='bool', stop_gradient=False) level = 0 out_true, out_false = split_lod_tensor(input=x, mask=y, level=level) out = merge_lod_tensor(in_true=out_true, in_false=out_false, mask=y, x=x, level=level) mean = layers.mean(out) append_backward(mean) tensor = core.LoDTensor() tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place) tensor.set_recursive_sequence_lengths([[3, 6, 1]]) mask_np = np.array([0, 1, 0]).astype('bool') mask_np = np.expand_dims(mask_np, axis=1) mask = core.LoDTensor() mask.set(mask_np, place) exe = Executor(place) scope = core.Scope() g_vars = program.global_block().var(x.name + "@GRAD") g_out = [ item.sum() for item in map( np.array, exe.run(program, feed={ 'x': tensor, 'y': mask }, fetch_list=[g_vars], scope=scope, return_numpy=False)) ] g_out_sum = np.array(g_out).sum() self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
def main(self, tensor, mask, expect_true, expect_false, expect_out, level=0): place = self.place() program = Program() with program_guard(program): x = layers.data(name='x', shape=[1]) x.persistable = True y = layers.data(name='y', shape=[1]) y.persistable = True out_true, out_false = split_lod_tensor(input=x, mask=y, level=level) out_true.persistable = True out_false.persistable = True out = merge_lod_tensor(in_true=out_true, in_false=out_false, mask=y, x=x, level=level) out.persistable = True exe = Executor(place) scope = core.Scope() exe.run(program, feed={ 'x': tensor, 'y': mask }, scope=scope, return_numpy=False) var_true = scope.find_var(out_true.name).get_tensor() var_false = scope.find_var(out_false.name).get_tensor() var_out = scope.find_var(out.name).get_tensor() self.check_tensor_same(var_true, expect_true) self.check_tensor_same(var_false, expect_false) self.check_tensor_same(var_out, expect_out)
def main(self, tensor, mask, expect_true, expect_false, expect_out, level=0, use_merge_lod_infer=False): place = self.place() program = Program() with program_guard(program): x = layers.data(name='x', shape=[1]) x.persistable = True y = layers.data(name='y', shape=[1]) y.persistable = True out_true, out_false = split_lod_tensor(input=x, mask=y, level=level) out_true.persistable = True out_false.persistable = True if use_merge_lod_infer: input_dict = { 'X': x, 'Mask': mask, 'InTrue': out_true, 'InFalse': out_false, 'level': level } helper = LayerHelper('merge_lod_tensor_infer') out = helper.create_variable_for_type_inference( dtype=out_true.dtype) helper.append_op( type='merge_lod_tensor_infer', inputs={ 'X': x, 'Mask': y, 'InTrue': out_true, 'InFalse': out_false }, outputs={'Out': out}, attrs={'level': level}) out.persistable = True else: out = merge_lod_tensor( in_true=out_true, in_false=out_false, mask=y, x=x, level=level) out.persistable = True exe = Executor(place) scope = core.Scope() exe.run(program, feed={'x': tensor, 'y': mask}, scope=scope, return_numpy=False) var_true = scope.find_var(out_true.name).get_tensor() var_false = scope.find_var(out_false.name).get_tensor() var_out = scope.find_var(out.name).get_tensor() if not use_merge_lod_infer: self.check_tensor_same(var_true, expect_true) self.check_tensor_same(var_false, expect_false) self.check_tensor_same(var_out, expect_out)