def test_main(self): N = 10 img_expected_res = [] lbl_expected_res = [] with fluid.program_guard(fluid.Program(), fluid.Program()): data_file = open_recordio_file( './mnist_for_preprocessor_test.recordio', shapes=[[-1, 784], [-1, 1]], lod_levels=[0, 0], dtypes=['float32', 'int64']) img, lbl = fluid.layers.io.read_file(data_file) if fluid.core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) else: place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) for _ in range(N): img_v, lbl_v = exe.run(fetch_list=[img, lbl]) img_expected_res.append(img_v / 2) lbl_expected_res.append(lbl_v + 1) img_actual_res = [] lbl_actual_res = [] with fluid.program_guard(fluid.Program(), fluid.Program()): data_file = open_recordio_file( './mnist_for_preprocessor_test.recordio', shapes=[[-1, 784], [-1, 1]], lod_levels=[0, 0], dtypes=['float32', 'int64']) preprocessor = fluid.layers.io.Preprocessor(reader=data_file) with preprocessor.block(): img, lbl = preprocessor.inputs() img_out = img / 2 lbl_out = lbl + 1 preprocessor.outputs(img_out, lbl_out) data_file = fluid.layers.io.double_buffer(preprocessor()) img, lbl = fluid.layers.io.read_file(data_file) if fluid.core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) else: place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) for _ in range(N): img_v, lbl_v = exe.run(fetch_list=[img, lbl]) img_actual_res.append(img_v) lbl_actual_res.append(lbl_v) for idx in range(N): np.allclose(img_expected_res[idx], img_actual_res[idx]) np.allclose(lbl_expected_res[idx], lbl_actual_res[idx])
def test_main(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data_file = open_recordio_file(filename='./mnist.recordio', shapes=[(-1, 784), (-1, 1)], lod_levels=[0, 0], dtypes=['float32', 'int64'], pass_num=self.pass_num) img, label = fluid.layers.read_file(data_file) if fluid.core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) else: place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) batch_count = 0 while True: try: img_val, = exe.run(fetch_list=[img]) except fluid.core.EOFException: break batch_count += 1 self.assertLessEqual(img_val.shape[0], self.batch_size) self.assertEqual(batch_count, self.num_batch * self.pass_num)
def test_main(self, decorator_callback=None): # use new program with fluid.program_guard(fluid.Program(), fluid.Program()): data_file = open_recordio_file( './mnist.recordio', shapes=[[-1, 784], [-1, 1]], lod_levels=[0, 0], dtypes=['float32', 'int64']) if decorator_callback is not None: data_file = decorator_callback(data_file) img, label = fluid.layers.read_file(data_file) hidden = fluid.layers.fc(input=img, size=100, act='tanh') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) avg_loss = fluid.layers.mean(loss) fluid.optimizer.Adam(learning_rate=1e-3).minimize(avg_loss) if fluid.core.is_compiled_with_cuda(): place = fluid.CUDAPlace(0) else: place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) avg_loss_np = [] # train a pass batch_id = 0 while True: try: tmp, = exe.run(fetch_list=[avg_loss]) except fluid.core.EOFException: break avg_loss_np.append(tmp) batch_id += 1 self.assertEqual(batch_id, self.num_batches) self.assertLess(avg_loss_np[-1], avg_loss_np[0])
def transformer( src_vocab_size, trg_vocab_size, max_length, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, dropout_rate, src_pad_idx, trg_pad_idx, pos_pad_idx, ): file_obj = open_recordio_file( filename=os.environ.get('RECORDIO_FILENAME', '/tmp/wmt16.recordio'), shapes=[ [batch_size * max_length, 1], [batch_size * max_length, 1], [batch_size * max_length, 1], [batch_size * max_length, 1], [batch_size, n_head, max_length, max_length], [batch_size, n_head, max_length, max_length], [batch_size, n_head, max_length, max_length], [batch_size * max_length, 1], [batch_size * max_length, 1], ], dtypes=[ 'int64', 'int64', 'int64', 'int64', 'float32', 'float32', 'float32', 'int64', 'float32', ], lod_levels=[0] * 9) src_word, src_pos, trg_word, trg_pos, src_slf_attn_bias, trg_slf_attn_bias, trg_src_attn_bias, gold, weights = fluid.layers.read_file( file_obj) enc_input = prepare_encoder( src_word, src_pos, src_vocab_size, d_model, src_pad_idx, max_length, dropout_rate, ) enc_output = encoder( enc_input, src_slf_attn_bias, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, dropout_rate, ) dec_input = prepare_decoder( trg_word, trg_pos, trg_vocab_size, d_model, trg_pad_idx, max_length, dropout_rate, ) dec_output = decoder( dec_input, enc_output, trg_slf_attn_bias, trg_src_attn_bias, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, dropout_rate, ) # TODO(guosheng): Share the weight matrix between the embedding layers and # the pre-softmax linear transformation. predict = layers.reshape( x=layers.fc(input=dec_output, size=trg_vocab_size, param_attr=fluid.initializer.Xavier(uniform=False), bias_attr=False, num_flatten_dims=2), shape=[-1, trg_vocab_size], act="softmax") cost = layers.cross_entropy(input=predict, label=gold) weighted_cost = cost * weights return layers.reduce_sum(weighted_cost)