def beam_decode(): mylog("Reading Data...") from_test = None from_vocab_path, to_vocab_path, real_vocab_size_from, real_vocab_size_to = data_utils.get_vocab_info( FLAGS.data_cache_dir) FLAGS._buckets = _buckets FLAGS._beam_buckets = _beam_buckets FLAGS.real_vocab_size_from = real_vocab_size_from FLAGS.real_vocab_size_to = real_vocab_size_to from_test = data_utils.prepare_test_data(FLAGS.data_cache_dir, FLAGS.test_path_from, from_vocab_path) test_data_bucket, test_data_order = read_data_test(from_test) test_bucket_sizes = [ len(test_data_bucket[b]) for b in xrange(len(_beam_buckets)) ] test_total_size = int(sum(test_bucket_sizes)) # reports mylog("from_vocab_size: {}".format(FLAGS.from_vocab_size)) mylog("to_vocab_size: {}".format(FLAGS.to_vocab_size)) mylog("_beam_buckets: {}".format(FLAGS._beam_buckets)) mylog("BEAM_DECODE:") mylog("total: {}".format(test_total_size)) mylog("buckets: {}".format(test_bucket_sizes)) config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) config.gpu_options.allow_growth = FLAGS.allow_growth with tf.Session(config=config) as sess: # runtime profile if FLAGS.profile: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() else: run_options = None run_metadata = None mylog("Creating Model") model = create_model(sess, run_options, run_metadata) show_all_variables() sess.run(model.dropoutRate.assign(1.0)) start_id = 0 n_steps = 0 batch_size = FLAGS.batch_size dite = DataIterator(model, test_data_bucket, len(_beam_buckets), batch_size, None, data_order=test_data_order) ite = dite.next_original() i_sent = 0 targets = [] for source_inputs, bucket_id, length in ite: print("--- decoding {}/{} sent ---".format(i_sent, test_total_size)) i_sent += 1 results = [] # (sentence,score) scores = [0.0] * FLAGS.beam_size sentences = [[] for x in xrange(FLAGS.beam_size)] beam_parent = range(FLAGS.beam_size) target_inputs = [data_utils.GO_ID] * FLAGS.beam_size min_target_length = int(length * FLAGS.min_ratio) + 1 max_target_length = int( length * FLAGS.max_ratio) + 1 # include EOS for i in xrange(max_target_length): if i == 0: top_value, top_index, eos_value = model.beam_step( sess, bucket_id, index=i, sources=source_inputs, target_inputs=target_inputs) else: top_value, top_index, eos_value = model.beam_step( sess, bucket_id, index=i, target_inputs=target_inputs, beam_parent=beam_parent) # top_value = [array[batch_size, batch_size]] # top_index = [array[batch_size, batch_size]] # eos_value = [array[batch_size, 1] ] # expand global_queue = [] if i == 0: nrow = 1 else: nrow = FLAGS.beam_size if i == max_target_length - 1: # last_step for row in xrange(nrow): score = scores[row] + np.log(eos_value[0][row, 0]) word_index = data_utils.EOS_ID beam_index = row global_queue.append((score, beam_index, word_index)) else: for row in xrange(nrow): for col in xrange(top_index[0].shape[1]): score = scores[row] + np.log(top_value[0][row, col]) word_index = top_index[0][row, col] beam_index = row global_queue.append( (score, beam_index, word_index)) global_queue = sorted(global_queue, key=lambda x: -x[0]) if FLAGS.print_beam: print("--------- Step {} --------".format(i)) target_inputs = [] beam_parent = [] scores = [] temp_sentences = [] for j, (score, beam_index, word_index) in enumerate(global_queue): if word_index == data_utils.EOS_ID: if len(sentences[beam_index]) + 1 < min_target_length: continue results.append( (sentences[beam_index] + [word_index], score)) if FLAGS.print_beam: print("*Beam:{} Father:{} word:{} score:{}".format( j, beam_index, word_index, score)) continue if FLAGS.print_beam: print("Beam:{} Father:{} word:{} score:{}".format( j, beam_index, word_index, score)) beam_parent.append(beam_index) target_inputs.append(word_index) scores.append(score) temp_sentences.append(sentences[beam_index] + [word_index]) if len(scores) >= FLAGS.beam_size: break # can not fill beam_size, just repeat the last one while len(scores ) < FLAGS.beam_size and i < max_target_length - 1: beam_parent.append(beam_parent[-1]) target_inputs.append(target_inputs[-1]) scores.append(scores[-1]) temp_sentences.append(temp_sentences[-1]) sentences = temp_sentences # print the 1 best results = sorted(results, key=lambda x: -x[1]) targets.append(results[0][0]) data_utils.ids_to_tokens(targets, to_vocab_path, FLAGS.decode_output)
def train(): # Read Data mylog_section("READ DATA") from_train = None to_train = None from_dev = None to_dev = None from_train, to_train, from_dev, to_dev, _, _ = data_utils.prepare_data( FLAGS.data_cache_dir, FLAGS.train_path_from, FLAGS.train_path_to, FLAGS.dev_path_from, FLAGS.dev_path_to, FLAGS.from_vocab_size, FLAGS.to_vocab_size) train_data_bucket = read_data(from_train, to_train) dev_data_bucket = read_data(from_dev, to_dev) _, _, real_vocab_size_from, real_vocab_size_to = data_utils.get_vocab_info( FLAGS.data_cache_dir) FLAGS._buckets = _buckets FLAGS.real_vocab_size_from = real_vocab_size_from FLAGS.real_vocab_size_to = real_vocab_size_to #train_n_tokens = total training target size train_n_tokens = np.sum( [np.sum([len(items[1]) for items in x]) for x in train_data_bucket]) train_bucket_sizes = [ len(train_data_bucket[b]) for b in xrange(len(_buckets)) ] train_total_size = float(sum(train_bucket_sizes)) train_buckets_scale = [ sum(train_bucket_sizes[:i + 1]) / train_total_size for i in xrange(len(train_bucket_sizes)) ] dev_bucket_sizes = [len(dev_data_bucket[b]) for b in xrange(len(_buckets))] dev_total_size = int(sum(dev_bucket_sizes)) mylog_section("REPORT") # steps batch_size = FLAGS.batch_size n_epoch = FLAGS.n_epoch steps_per_epoch = int(train_total_size / batch_size) steps_per_dev = int(dev_total_size / batch_size) steps_per_checkpoint = int(steps_per_epoch / 2) total_steps = steps_per_epoch * n_epoch # reports mylog("from_vocab_size: {}".format(FLAGS.from_vocab_size)) mylog("to_vocab_size: {}".format(FLAGS.to_vocab_size)) mylog("_buckets: {}".format(FLAGS._buckets)) mylog("Train:") mylog("total: {}".format(train_total_size)) mylog("bucket sizes: {}".format(train_bucket_sizes)) mylog("Dev:") mylog("total: {}".format(dev_total_size)) mylog("bucket sizes: {}".format(dev_bucket_sizes)) mylog("Steps_per_epoch: {}".format(steps_per_epoch)) mylog("Total_steps:{}".format(total_steps)) mylog("Steps_per_checkpoint: {}".format(steps_per_checkpoint)) mylog_section("IN TENSORFLOW") config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) config.gpu_options.allow_growth = FLAGS.allow_growth with tf.Session(config=config) as sess: # runtime profile if FLAGS.profile: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() else: run_options = None run_metadata = None mylog_section("MODEL/SUMMARY/WRITER") mylog("Creating Model.. (this can take a few minutes)") model = create_model(sess, run_options, run_metadata) if FLAGS.with_summary: mylog("Creating ModelSummary") modelSummary = ModelSummary() mylog("Creating tf.summary.FileWriter") summaryWriter = tf.summary.FileWriter( os.path.join(FLAGS.summary_dir, "train.summary"), sess.graph) mylog_section("All Variables") show_all_variables() # Data Iterators mylog_section("Data Iterators") dite = DataIterator(model, train_data_bucket, len(train_buckets_scale), batch_size, train_buckets_scale) iteType = 0 if iteType == 0: mylog("Itetype: withRandom") ite = dite.next_random() elif iteType == 1: mylog("Itetype: withSequence") ite = dite.next_sequence() # statistics during training step_time, loss = 0.0, 0.0 current_step = 0 previous_losses = [] low_ppx = float("inf") low_ppx_step = 0 steps_per_report = 30 n_targets_report = 0 report_time = 0 n_valid_sents = 0 n_valid_words = 0 patience = FLAGS.patience mylog_section("TRAIN") while current_step < total_steps: # start start_time = time.time() # data and train source_inputs, target_inputs, target_outputs, target_weights, bucket_id = ite.__next__( ) L = model.step(sess, source_inputs, target_inputs, target_outputs, target_weights, bucket_id) # loss and time step_time += (time.time() - start_time) / steps_per_checkpoint loss += L current_step += 1 n_valid_sents += np.sum(np.sign(target_weights[0])) n_valid_words += np.sum(target_weights) # for report report_time += (time.time() - start_time) n_targets_report += np.sum(target_weights) if current_step % steps_per_report == 0: sect_name = "STEP {}".format(current_step) msg = "StepTime: {:.2f} sec Speed: {:.2f} targets/s Total_targets: {}".format( report_time / steps_per_report, n_targets_report * 1.0 / report_time, train_n_tokens) mylog_line(sect_name, msg) report_time = 0 n_targets_report = 0 # Create the Timeline object, and write it to a json if FLAGS.profile: tl = timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() with open('timeline.json', 'w') as f: f.write(ctf) exit() if current_step % steps_per_checkpoint == 0: i_checkpoint = int(current_step / steps_per_checkpoint) # train_ppx loss = loss / n_valid_words train_ppx = math.exp( float(loss)) if loss < 300 else float("inf") learning_rate = model.learning_rate.eval() # dev_ppx dev_loss, dev_ppx = evaluate(sess, model, dev_data_bucket) # report sect_name = "CHECKPOINT {} STEP {}".format( i_checkpoint, current_step) msg = "Learning_rate: {:.4f} Dev_ppx: {:.2f} Train_ppx: {:.2f}".format( learning_rate, dev_ppx, train_ppx) mylog_line(sect_name, msg) if FLAGS.with_summary: # save summary _summaries = modelSummary.step_record( sess, train_ppx, dev_ppx) for _summary in _summaries: summaryWriter.add_summary(_summary, i_checkpoint) # save model per checkpoint if FLAGS.saveCheckpoint: checkpoint_path = os.path.join(FLAGS.saved_model_dir, "model") s = time.time() model.saver.save(sess, checkpoint_path, global_step=i_checkpoint, write_meta_graph=False) msg = "Model saved using {:.2f} sec at {}".format( time.time() - s, checkpoint_path) mylog_line(sect_name, msg) # save best model if dev_ppx < low_ppx: patience = FLAGS.patience low_ppx = dev_ppx low_ppx_step = current_step checkpoint_path = os.path.join(FLAGS.saved_model_dir, "best") s = time.time() model.best_saver.save(sess, checkpoint_path, global_step=0, write_meta_graph=False) msg = "Model saved using {:.2f} sec at {}".format( time.time() - s, checkpoint_path) mylog_line(sect_name, msg) else: patience -= 1 if patience <= 0: mylog("Training finished. Running out of patience.") break # Save checkpoint and zero timer and loss. step_time, loss, n_valid_sents, n_valid_words = 0.0, 0.0, 0, 0