def test_forward(self): data = layers.data(name='X', shape=[1], dtype='float32') data.stop_gradient = False cond = layers.ConditionalBlock(inputs=[data]) out = layers.create_tensor(dtype='float32') with cond.block(): hidden = layers.fc(input=data, size=10) layers.assign(hidden, out) cpu = core.CPUPlace() exe = Executor(cpu) exe.run(default_startup_program()) x = numpy.random.random(size=(10, 1)).astype('float32') outs = exe.run(feed={'X': x}, fetch_list=[out])[0] print outs loss = layers.mean(x=out) append_backward_ops(loss=loss) outs = exe.run(feed={'X': x}, fetch_list=[ default_main_program().block(0).var(data.name + "@GRAD") ])[0] print outs
def test_forward(self): data = layers.data(name='X', shape=[1], dtype='float32') data.stop_gradient = False cond = layers.ConditionalBlock(inputs=[data]) out = layers.create_tensor(dtype='float32') with cond.block(): hidden = layers.fc(input=data, size=10) layers.assign(hidden, out) cpu = core.CPUPlace() exe = Executor(cpu) exe.run(default_startup_program()) x = numpy.random.random(size=(10, 1)).astype('float32') outs = exe.run(feed={'X': x}, fetch_list=[out])[0] print outs loss = layers.mean(x=out) append_backward_ops(loss=loss) outs = exe.run( feed={'X': x}, fetch_list=[ default_main_program().block(0).var(data.name + "@GRAD") ])[0] print outs
def main(): rnn_out = encoder_decoder() label = layers.data(name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = layers.cross_entropy(input=rnn_out, label=label) avg_cost = fluid.layers.mean(x=cost) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimizer.minimize(avg_cost) train_data = paddle.batch(paddle.reader.shuffle( paddle.dataset.wmt14.train(dict_size), buf_size=1000), batch_size=batch_size) place = core.CPUPlace() exe = Executor(place) exe.run(framework.default_startup_program()) batch_id = 0 for pass_id in xrange(2): for data in train_data(): word_data = to_lodtensor(map(lambda x: x[0], data), place) trg_word = to_lodtensor(map(lambda x: x[1], data), place) trg_word_next = to_lodtensor(map(lambda x: x[2], data), place) outs = exe.run(framework.default_main_program(), feed={ 'src_word_id': word_data, 'target_language_word': trg_word, 'target_language_next_word': trg_word_next }, fetch_list=[avg_cost]) avg_cost_val = np.array(outs[0]) print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + " avg_cost=" + str(avg_cost_val)) if batch_id > 3: exit(0) batch_id += 1
def main(): rnn_out = encoder_decoder() label = layers.data( name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) cost = layers.cross_entropy(input=rnn_out, label=label) avg_cost = fluid.layers.mean(x=cost) optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimizer.minimize(avg_cost) train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.wmt14.train(dict_size), buf_size=1000), batch_size=batch_size) place = core.CPUPlace() exe = Executor(place) exe.run(framework.default_startup_program()) batch_id = 0 for pass_id in xrange(2): for data in train_data(): word_data = to_lodtensor(map(lambda x: x[0], data), place) trg_word = to_lodtensor(map(lambda x: x[1], data), place) trg_word_next = to_lodtensor(map(lambda x: x[2], data), place) outs = exe.run(framework.default_main_program(), feed={ 'src_word_id': word_data, 'target_language_word': trg_word, 'target_language_next_word': trg_word_next }, fetch_list=[avg_cost]) avg_cost_val = np.array(outs[0]) print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + " avg_cost=" + str(avg_cost_val)) if batch_id > 3: exit(0) batch_id += 1
seed=SEED)) cost = layers.cross_entropy(input=predict, label=label) avg_cost = layers.mean(x=cost) optimizer = AdamOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999) opts = optimizer.minimize(avg_cost) accuracy = evaluator.Accuracy(input=predict, label=label) train_reader = paddle.batch(paddle.dataset.mnist.train(), batch_size=BATCH_SIZE) place = core.CPUPlace() exe = Executor(place) exe.run(framework.default_startup_program()) for pass_id in range(PASS_NUM): accuracy.reset(exe) pass_start = time.clock() for batch_id, data in enumerate(train_reader()): img_data = np.array(map(lambda x: x[0].reshape([1, 28, 28]), data)).astype(DTYPE) y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = y_data.reshape([len(y_data), 1]) tensor_img = core.LoDTensor() tensor_y = core.LoDTensor() tensor_img.set(img_data, place) tensor_y.set(y_data, place)
BATCH_SIZE = 16 PASS_NUM = 1 train_reader = paddle.batch( paddle.reader.shuffle(paddle.dataset.cifar.train10(), buf_size=128 * 10), batch_size=BATCH_SIZE) place = fluid.CPUPlace() exe = fluid.Executor(place) feeder = fluid.DataFeeder(place=place, feed_list=[images, label]) exe.run(fluid.default_startup_program()) step = 0 sample_num = 0 start_up_program = framework.default_startup_program() param1_var = start_up_program.global_block().var("param1") for pass_id in range(PASS_NUM): accuracy.reset(exe) for data in train_reader(): loss, conv1_out, param1, acc = exe.run( fluid.default_main_program(), feed=feeder.feed(data), fetch_list=[avg_cost, conv1, param1_var] + accuracy.metrics) pass_acc = accuracy.eval(exe) # all code below is for VisualDL # start picking sample from beginning
def main(): cost = model() sgd_optimizer = SGDOptimizer(learning_rate=0.2) opts = sgd_optimizer.minimize(cost) if USE_GPU: place = core.GPUPlace(0) else: place = core.CPUPlace() exe = Executor(place) exe.run(framework.default_startup_program()) train_reader = paddle.batch(paddle.reader.shuffle( paddle.dataset.movielens.train(), buf_size=8192), batch_size=BATCH_SIZE) feeding = { 'user_id': 0, 'gender_id': 1, 'age_id': 2, 'job_id': 3, 'movie_id': 4, 'category_id': 5, 'movie_title': 6, 'score': 7 } def func_feed(feeding, data): feed_tensors = {} for (key, idx) in feeding.iteritems(): tensor = core.LoDTensor() if key != "category_id" and key != "movie_title": if key == "score": numpy_data = np.array(map(lambda x: x[idx], data)).astype("float32") else: numpy_data = np.array(map(lambda x: x[idx], data)).astype("int64") else: numpy_data = map(lambda x: np.array(x[idx]).astype("int64"), data) lod_info = [len(item) for item in numpy_data] offset = 0 lod = [offset] for item in lod_info: offset += item lod.append(offset) numpy_data = np.concatenate(numpy_data, axis=0) tensor.set_lod([lod]) numpy_data = numpy_data.reshape([numpy_data.shape[0], 1]) tensor.set(numpy_data, place) feed_tensors[key] = tensor return feed_tensors PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): outs = exe.run(framework.default_main_program(), feed=func_feed(feeding, data), fetch_list=[cost]) out = np.array(outs[0]) if out[0] < 6.0: # if avg cost less than 6.0, we think our code is good. exit(0)
def main(): cost = model() sgd_optimizer = SGDOptimizer(learning_rate=0.2) opts = sgd_optimizer.minimize(cost) if USE_GPU: place = core.GPUPlace(0) else: place = core.CPUPlace() exe = Executor(place) exe.run(framework.default_startup_program()) train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.movielens.train(), buf_size=8192), batch_size=BATCH_SIZE) feeding = { 'user_id': 0, 'gender_id': 1, 'age_id': 2, 'job_id': 3, 'movie_id': 4, 'category_id': 5, 'movie_title': 6, 'score': 7 } def func_feed(feeding, data): feed_tensors = {} for (key, idx) in feeding.iteritems(): tensor = core.LoDTensor() if key != "category_id" and key != "movie_title": if key == "score": numpy_data = np.array(map(lambda x: x[idx], data)).astype( "float32") else: numpy_data = np.array(map(lambda x: x[idx], data)).astype( "int64") else: numpy_data = map(lambda x: np.array(x[idx]).astype("int64"), data) lod_info = [len(item) for item in numpy_data] offset = 0 lod = [offset] for item in lod_info: offset += item lod.append(offset) numpy_data = np.concatenate(numpy_data, axis=0) tensor.set_lod([lod]) numpy_data = numpy_data.reshape([numpy_data.shape[0], 1]) tensor.set(numpy_data, place) feed_tensors[key] = tensor return feed_tensors PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): outs = exe.run(framework.default_main_program(), feed=func_feed(feeding, data), fetch_list=[cost]) out = np.array(outs[0]) if out[0] < 6.0: # if avg cost less than 6.0, we think our code is good. exit(0)
import unittest import paddle.v2.fluid.core as core from paddle.v2.fluid.framework import Program, default_startup_program main_program = default_startup_program() class TestOperator(unittest.TestCase): def test_error_type(self): block = main_program.create_block() try: block.append_op() self.assertFail() except ValueError as v_err: self.assertEqual( v_err.message, "`type` to initilized an Operator can not be None.") try: block.append_op(type="no_such_op") self.assertFail() except ValueError as a_err: self.assertEqual( a_err.message, "Operator \"no_such_op\" has not been registered.") def test_op_desc_creation(self): program = Program() block = program.current_block() mul_x = block.create_var(dtype="float32",
def main(): cost, u, m = model() optimizer = SGD(learning_rate=0.2) # optimizer = Adam(learning_rate=1e-4) opts = optimizer.minimize(cost) if USE_GPU: place = core.GPUPlace(0) else: place = core.CPUPlace() exe = Executor(place) exe.run(framework.default_startup_program()) # print framework.default_main_program().block(0).vars.keys() print framework.default_main_program().block(0).vars.get( "embedding_0.tmp_0") train_reader = paddle.batch(paddle.dataset.movielens.train(), batch_size=BATCH_SIZE) feeding = { 'user_id': 0, 'gender_id': 1, 'age_id': 2, 'job_id': 3, 'movie_id': 4, # 'category_id': 5, 'movie_title': 6, 'score': 7 } def func_feed(feeding, data): feed_tensors = {} for (key, idx) in feeding.iteritems(): tensor = core.LoDTensor() if key != "category_id" and key != "movie_title": if key == "score": numpy_data = np.array(map(lambda x: x[idx], data)).astype("float32") else: numpy_data = np.array(map(lambda x: x[idx], data)).astype("int64") else: numpy_data = map(lambda x: np.array(x[idx]).astype("int64"), data) lod_info = [len(item) for item in numpy_data] offset = 0 lod = [offset] for item in lod_info: offset += item lod.append(offset) numpy_data = np.concatenate(numpy_data, axis=0) tensor.set_lod([lod]) numpy_data = numpy_data.reshape([numpy_data.shape[0], 1]) tensor.set(numpy_data, place) feed_tensors[key] = tensor return feed_tensors PASS_NUM = 5 for pass_id in range(PASS_NUM): batch_id = 0 ts = time.time() for data in train_reader(): outs = exe.run(framework.default_main_program(), feed=func_feed(feeding, data), fetch_list=[cost, u, m]) out = np.array(outs[0]) if batch_id % 100 == 0: print("pass %d, batch %d, cost: %f" % (pass_id, batch_id, out[0])) print(outs[1]) print(outs[2]) batch_id += 1 # if out[0] < 6.0: # # if avg cost less than 6.0, we think our code is good. # exit(0) print("pass %d, cost: %f, time: %f" % (pass_id, out[0], time.time() - ts))