def compare_ifelse_op_and_numpy(self, place): self.set_test_case() prog = Program() startup_prog = Program() with program_guard(prog, startup_prog): src = layers.data(name='data', shape=[1], dtype='float32') cond = layers.fill_constant( [1], dtype='float32', value=self.cond_value) ifcond = layers.less_than(x=src, y=cond) ie = layers.IfElse(ifcond) with ie.true_block(): true_target = ie.input(src) true_target = fluid.layers.exp(true_target) ie.output(true_target) with ie.false_block(): false_target = ie.input(src) false_target = fluid.layers.tanh(false_target) ie.output(false_target) if_out = ie() out = layers.reduce_sum(if_out) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) fetch_list = [out] o1, = exe.run(fluid.default_main_program(), feed={'data': self.data}, fetch_list=[out]) o2 = self.numpy_cal() self.assertTrue( np.allclose( o1, o2, atol=1e-8), "IfElse result : " + str(o1) + "\n Numpy result :" + str(o2))
def test_ifelse(self): prog = Program() startup_prog = Program() with program_guard(prog, startup_prog): image = layers.data(name='x', shape=[784], dtype='float32') label = layers.data(name='y', shape=[1], dtype='int64') limit = layers.fill_constant_batch_size_like(input=label, dtype='int64', shape=[1], value=5.0) cond = layers.less_than(x=label, y=limit) ie = layers.IfElse(cond) with ie.true_block(): true_image = ie.input(image) hidden = layers.fc(input=true_image, size=100, act='tanh') prob = layers.fc(input=hidden, size=10, act='softmax') ie.output(prob) with ie.false_block(): false_image = ie.input(image) hidden = layers.fc(input=false_image, size=200, act='tanh') prob = layers.fc(input=hidden, size=10, act='softmax') ie.output(prob) prob = ie() loss = layers.cross_entropy(input=prob[0], label=label) avg_loss = layers.mean(loss) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(avg_loss, startup_prog) train_reader = paddle.batch(paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=8192), batch_size=200) place = core.CPUPlace() exe = Executor(place) exe.run(kwargs['startup_program']) PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): x_data = np.array(map(lambda x: x[0], data)).astype("float32") y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = y_data.reshape((y_data.shape[0], 1)) outs = exe.run(kwargs['main_program'], feed={ 'x': x_data, 'y': y_data }, fetch_list=[avg_loss]) print outs[0] if outs[0] < 1.0: return self.assertFalse(True)
def test_input_type_error(self): main_program = Program() startup_program = Program() with program_guard(main_program, startup_program): src = layers.data(name='data', shape=[1], dtype='float32') const_value = layers.fill_constant( [1], dtype='float32', value=123.0) ifcond = layers.less_than(x=src, y=const_value) with self.assertRaises(TypeError): ie = layers.IfElse(set()) with self.assertRaises(TypeError): ie = layers.IfElse(ifcond, set()) with self.assertRaises(TypeError): ie = layers.IfElse(ifcond) with ie.true_block(): true_target = ie.input(src) true_target = fluid.layers.exp(true_target) ie.output([])
def check_network_convergence(self, use_cuda=True, use_mem_opt=False, iter_num=5): prog = Program() startup_prog = Program() prog.random_seed = 100 startup_prog.random_seed = 100 with program_guard(prog, startup_prog): image = layers.data(name='x', shape=[784], dtype='float32') label = layers.data(name='y', shape=[1], dtype='int64') limit = layers.fill_constant(shape=[1], dtype='int64', value=5) cond = layers.less_than(x=label, y=limit) ie = layers.IfElse(cond) with ie.true_block(): true_image = ie.input(image) hidden = layers.fc(input=true_image, size=100, act='tanh') prob = layers.fc(input=hidden, size=10, act='softmax') ie.output(prob) with ie.false_block(): false_image = ie.input(image) hidden = layers.fc(input=false_image, size=200, act='tanh') prob = layers.fc(input=hidden, size=10, act='softmax') ie.output(prob) prob = ie() loss = layers.cross_entropy(input=prob[0], label=label) avg_loss = layers.mean(loss) optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(avg_loss, startup_prog) train_reader = paddle.batch( paddle.dataset.mnist.train(), batch_size=200) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = Executor(place) exec_strategy = fluid.ExecutionStrategy() exec_strategy.use_cuda = use_cuda build_strategy = fluid.BuildStrategy() build_strategy.memory_optimize = use_mem_opt train_cp = compiler.CompiledProgram(fluid.default_main_program()) train_cp = train_cp.with_data_parallel( loss_name=avg_loss.name, exec_strategy=exec_strategy, build_strategy=build_strategy) fetch_list = [avg_loss.name] exe.run(startup_prog) PASS_NUM = 100 loop = 0 ret = [] for pass_id in range(PASS_NUM): for data in train_reader(): x_data = np.array([x[0] for x in data]).astype("float32") y_data = np.array([x[1] for x in data]).astype("int64") y_data = y_data.reshape((y_data.shape[0], 1)) outs = exe.run(train_cp, feed={'x': x_data, 'y': y_data}, fetch_list=[avg_loss]) loop += 1 ret.append(outs[0]) if iter_num == loop: return ret return ret