def train(): # Read Data mylog_section("READ DATA") from_train = None to_train = None from_dev = None to_dev = None from_train, to_train, from_dev, to_dev, _, _ = data_utils.prepare_data( FLAGS.data_cache_dir, FLAGS.train_path_from, FLAGS.train_path_to, FLAGS.dev_path_from, FLAGS.dev_path_to, FLAGS.from_vocab_size, FLAGS.to_vocab_size) train_data_bucket = read_data(from_train, to_train) dev_data_bucket = read_data(from_dev, to_dev) _, _, real_vocab_size_from, real_vocab_size_to = data_utils.get_vocab_info( FLAGS.data_cache_dir) FLAGS._buckets = _buckets FLAGS.real_vocab_size_from = real_vocab_size_from FLAGS.real_vocab_size_to = real_vocab_size_to #train_n_tokens = total training target size train_n_tokens = np.sum( [np.sum([len(items[1]) for items in x]) for x in train_data_bucket]) train_bucket_sizes = [ len(train_data_bucket[b]) for b in xrange(len(_buckets)) ] train_total_size = float(sum(train_bucket_sizes)) train_buckets_scale = [ sum(train_bucket_sizes[:i + 1]) / train_total_size for i in xrange(len(train_bucket_sizes)) ] dev_bucket_sizes = [len(dev_data_bucket[b]) for b in xrange(len(_buckets))] dev_total_size = int(sum(dev_bucket_sizes)) mylog_section("REPORT") # steps batch_size = FLAGS.batch_size n_epoch = FLAGS.n_epoch steps_per_epoch = int(train_total_size / batch_size) steps_per_dev = int(dev_total_size / batch_size) steps_per_checkpoint = int(steps_per_epoch / 2) total_steps = steps_per_epoch * n_epoch # reports mylog("from_vocab_size: {}".format(FLAGS.from_vocab_size)) mylog("to_vocab_size: {}".format(FLAGS.to_vocab_size)) mylog("_buckets: {}".format(FLAGS._buckets)) mylog("Train:") mylog("total: {}".format(train_total_size)) mylog("bucket sizes: {}".format(train_bucket_sizes)) mylog("Dev:") mylog("total: {}".format(dev_total_size)) mylog("bucket sizes: {}".format(dev_bucket_sizes)) mylog("Steps_per_epoch: {}".format(steps_per_epoch)) mylog("Total_steps:{}".format(total_steps)) mylog("Steps_per_checkpoint: {}".format(steps_per_checkpoint)) mylog_section("IN TENSORFLOW") config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) config.gpu_options.allow_growth = FLAGS.allow_growth with tf.Session(config=config) as sess: # runtime profile if FLAGS.profile: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() else: run_options = None run_metadata = None mylog_section("MODEL/SUMMARY/WRITER") mylog("Creating Model.. (this can take a few minutes)") model = create_model(sess, run_options, run_metadata) if FLAGS.with_summary: mylog("Creating ModelSummary") modelSummary = ModelSummary() mylog("Creating tf.summary.FileWriter") summaryWriter = tf.summary.FileWriter( os.path.join(FLAGS.summary_dir, "train.summary"), sess.graph) mylog_section("All Variables") show_all_variables() # Data Iterators mylog_section("Data Iterators") dite = DataIterator(model, train_data_bucket, len(train_buckets_scale), batch_size, train_buckets_scale) iteType = 0 if iteType == 0: mylog("Itetype: withRandom") ite = dite.next_random() elif iteType == 1: mylog("Itetype: withSequence") ite = dite.next_sequence() # statistics during training step_time, loss = 0.0, 0.0 current_step = 0 previous_losses = [] low_ppx = float("inf") low_ppx_step = 0 steps_per_report = 30 n_targets_report = 0 report_time = 0 n_valid_sents = 0 n_valid_words = 0 patience = FLAGS.patience mylog_section("TRAIN") while current_step < total_steps: # start start_time = time.time() # data and train source_inputs, target_inputs, target_outputs, target_weights, bucket_id = ite.__next__( ) L = model.step(sess, source_inputs, target_inputs, target_outputs, target_weights, bucket_id) # loss and time step_time += (time.time() - start_time) / steps_per_checkpoint loss += L current_step += 1 n_valid_sents += np.sum(np.sign(target_weights[0])) n_valid_words += np.sum(target_weights) # for report report_time += (time.time() - start_time) n_targets_report += np.sum(target_weights) if current_step % steps_per_report == 0: sect_name = "STEP {}".format(current_step) msg = "StepTime: {:.2f} sec Speed: {:.2f} targets/s Total_targets: {}".format( report_time / steps_per_report, n_targets_report * 1.0 / report_time, train_n_tokens) mylog_line(sect_name, msg) report_time = 0 n_targets_report = 0 # Create the Timeline object, and write it to a json if FLAGS.profile: tl = timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() with open('timeline.json', 'w') as f: f.write(ctf) exit() if current_step % steps_per_checkpoint == 0: i_checkpoint = int(current_step / steps_per_checkpoint) # train_ppx loss = loss / n_valid_words train_ppx = math.exp( float(loss)) if loss < 300 else float("inf") learning_rate = model.learning_rate.eval() # dev_ppx dev_loss, dev_ppx = evaluate(sess, model, dev_data_bucket) # report sect_name = "CHECKPOINT {} STEP {}".format( i_checkpoint, current_step) msg = "Learning_rate: {:.4f} Dev_ppx: {:.2f} Train_ppx: {:.2f}".format( learning_rate, dev_ppx, train_ppx) mylog_line(sect_name, msg) if FLAGS.with_summary: # save summary _summaries = modelSummary.step_record( sess, train_ppx, dev_ppx) for _summary in _summaries: summaryWriter.add_summary(_summary, i_checkpoint) # save model per checkpoint if FLAGS.saveCheckpoint: checkpoint_path = os.path.join(FLAGS.saved_model_dir, "model") s = time.time() model.saver.save(sess, checkpoint_path, global_step=i_checkpoint, write_meta_graph=False) msg = "Model saved using {:.2f} sec at {}".format( time.time() - s, checkpoint_path) mylog_line(sect_name, msg) # save best model if dev_ppx < low_ppx: patience = FLAGS.patience low_ppx = dev_ppx low_ppx_step = current_step checkpoint_path = os.path.join(FLAGS.saved_model_dir, "best") s = time.time() model.best_saver.save(sess, checkpoint_path, global_step=0, write_meta_graph=False) msg = "Model saved using {:.2f} sec at {}".format( time.time() - s, checkpoint_path) mylog_line(sect_name, msg) else: patience -= 1 if patience <= 0: mylog("Training finished. Running out of patience.") break # Save checkpoint and zero timer and loss. step_time, loss, n_valid_sents, n_valid_words = 0.0, 0.0, 0, 0
def train(): #1.读入train数据和dev数据 mylog_section('READ DATA') train_data_bucket, dev_data_bucket, _buckets, vocab_path = read_train_dev( FLAGS.data_cache_dir, FLAGS.train_path, FLAGS.dev_path, FLAGS.vocab_size, FLAGS.L, FLAGS.n_bucket) ##########以下是打印需要的信息 start ##################### real_vocab_size = get_real_vocab_size(vocab_path) FLAGS._buckets = _buckets FLAGS.real_vocab_size = real_vocab_size # 计算总共要处理的tokens个数 train_n_tokens = np.sum([ np.sum([len(sentence) for sentence in bucket]) for bucket in train_data_bucket ]) # train_data_bucket train_bucket_sizes = [ len(train_data_bucket[index]) for index in xrange(len(_buckets)) ] train_total_size = float(sum(train_bucket_sizes)) # 计算累计值,用于计算bucket,在 data_iterator中随机生成一个0-1的数,这里的train_buckets_scale根据每个bucket中句子数量的不同,切分成不同的权重[0.1,0.3,0.5,0.8,1] # 当随机的0-1的数落到上述权重的某个区间,那么就选哪个bucket。 train_buckets_scale = [ sum(train_bucket_sizes[:i + 1]) / train_total_size for i in xrange(len(train_bucket_sizes)) ] dev_bucket_sizes = [ len(dev_data_bucket[index]) for index in xrange(len(_buckets)) ] dev_total_size = int(sum(dev_bucket_sizes)) mylog_section("REPORT") # steps batch_size = FLAGS.batch_size n_epoch = FLAGS.n_epoch steps_per_epoch = int(train_total_size / batch_size) steps_per_checkpoint = int(steps_per_epoch / 2) #每半个epoch 验证一次模型 total_steps = steps_per_epoch * n_epoch # reports mylog("real_vocab_size: {}".format(FLAGS.real_vocab_size)) mylog("_buckets: {}".format(FLAGS._buckets)) mylog("Train:") mylog("total: {}".format(train_total_size)) mylog("bucket sizes: {}".format(train_bucket_sizes)) mylog("Dev:") mylog("total: {}".format(dev_total_size)) mylog("bucket sizes: {}".format(dev_bucket_sizes)) mylog("Steps_per_epoch: {}".format(steps_per_epoch)) mylog("Total_steps:{}".format(total_steps)) mylog("Steps_per_checkpoint: {}".format(steps_per_checkpoint)) ##########打印需要的信息 end ##################### mylog_section("IN TENSORFLOW") config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) config.gpu_options.allow_growth = FLAGS.allow_growth with tf.Session(config=config) as sess: # runtime profile if FLAGS.profile: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() else: run_options = None run_metadata = None mylog_section("MODEL/SUMMARY/WRITER") mylog("Creating Model.. (this can take a few minutes)") model = create_model(sess, run_options, run_metadata) mylog_section("All Variables") show_all_variables() # Data Iterators mylog_section("Data Iterators") dite = DataIterator(model, train_data_bucket, len(train_buckets_scale), batch_size, train_buckets_scale) iteType = 0 if iteType == 0: mylog("Itetype: withRandom") ite = dite.next_random() elif iteType == 1: mylog("Itetype: withSequence") ite = dite.next_sequence() # statistics during training step_time, loss = 0.0, 0.0 current_step = 0 low_ppx = float("inf") steps_per_report = 30 n_targets_report = 0 report_time = 0 n_valid_sents = 0 n_valid_words = 0 patience = FLAGS.patience mylog_section("TRAIN") while current_step < total_steps: # start start_time = time.time() # data and train inputs, outputs, weights, bucket_id = ite.next() #训练数据 L = model.step(sess, inputs, outputs, weights, bucket_id) # loss and time step_time += (time.time() - start_time) / steps_per_checkpoint loss += L current_step += 1 # 此处 weights 等数据的格式是 len(weights) == 句子长度 # len(weights[0]) 是 batch size n_valid_sents += np.sum(np.sign(weights[0])) n_valid_words += np.sum(weights) # for report report_time += (time.time() - start_time) n_targets_report += np.sum(weights) #显示信息 if current_step % steps_per_report == 0: sect_name = "STEP {}".format(current_step) msg = "StepTime: {:.2f} sec Speed: {:.2f} targets/s Total_targets: {}".format( report_time / steps_per_report, n_targets_report * 1.0 / report_time, train_n_tokens) mylog_line(sect_name, msg) report_time = 0 n_targets_report = 0 # Create the Timeline object, and write it to a json if FLAGS.profile: tl = timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() with open('timeline.json', 'w') as f: f.write(ctf) exit() #达到半个epoch,计算ppx(dev) if current_step % steps_per_checkpoint == 0: i_checkpoint = int(current_step / steps_per_checkpoint) # train_ppx loss = loss / n_valid_words train_ppx = math.exp( float(loss)) if loss < 300 else float("inf") learning_rate = model.learning_rate.eval() # dev_ppx dev_loss, dev_ppx = evaluate(sess, model, dev_data_bucket) # report sect_name = "CHECKPOINT {} STEP {}".format( i_checkpoint, current_step) msg = "Learning_rate: {:.4f} Dev_ppx: {:.2f} Train_ppx: {:.2f}".format( learning_rate, dev_ppx, train_ppx) mylog_line(sect_name, msg) # save model per checkpoint if FLAGS.saveCheckpoint: checkpoint_path = os.path.join(FLAGS.saved_model_dir, "model") s = time.time() model.saver.save(sess, checkpoint_path, global_step=i_checkpoint, write_meta_graph=False) msg = "Model saved using {:.2f} sec at {}".format( time.time() - s, checkpoint_path) mylog_line(sect_name, msg) # save best model if dev_ppx < low_ppx: patience = FLAGS.patience low_ppx = dev_ppx checkpoint_path = os.path.join(FLAGS.saved_model_dir, "best") s = time.time() model.best_saver.save(sess, checkpoint_path, global_step=0, write_meta_graph=False) msg = "Model saved using {:.2f} sec at {}".format( time.time() - s, checkpoint_path) mylog_line(sect_name, msg) else: patience -= 1 #每次当 dev_ppx >= low_ppx时 学习步长减半 sess.run(model.learning_rate_decay_op) msg = 'dev_ppx:{}, low_ppx:{}'.format( str(dev_ppx), str(low_ppx)) mylog_line(sect_name, msg) msg = 'dev_ppx >= low_ppx,patience ={}, learning_reate ={}'.format( str(patience), str(model.learning_rate.eval())) mylog_line(sect_name, msg) if patience <= 0: mylog("Training finished. Running out of patience.") break # Save checkpoint and zero timer and loss. step_time, loss, n_valid_sents, n_valid_words = 0.0, 0.0, 0, 0
def train(raw_data=FLAGS.raw_data): # Read Data mylog("Reading Data...") train_set, dev_set, test_set, embAttr, START_ID, item_population, p_item, _, _, _, _, _ = get_data( raw_data, data_dir=FLAGS.data_dir) n_targets_train = np.sum( [np.sum([len(items) for uid, items in x]) for x in train_set]) train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))] train_total_size = float(sum(train_bucket_sizes)) train_buckets_scale = [ sum(train_bucket_sizes[:i + 1]) / train_total_size for i in xrange(len(train_bucket_sizes)) ] dev_bucket_sizes = [len(dev_set[b]) for b in xrange(len(_buckets))] dev_total_size = int(sum(dev_bucket_sizes)) # steps batch_size = FLAGS.batch_size n_epoch = FLAGS.n_epoch steps_per_epoch = int(train_total_size / batch_size) steps_per_dev = int(dev_total_size / batch_size) steps_per_checkpoint = int(steps_per_epoch / 2) total_steps = steps_per_epoch * n_epoch # reports mylog(_buckets) mylog("Train:") mylog("total: {}".format(train_total_size)) mylog("bucket sizes: {}".format(train_bucket_sizes)) mylog("Dev:") mylog("total: {}".format(dev_total_size)) mylog("bucket sizes: {}".format(dev_bucket_sizes)) mylog("") mylog("Steps_per_epoch: {}".format(steps_per_epoch)) mylog("Total_steps:{}".format(total_steps)) mylog("Steps_per_checkpoint: {}".format(steps_per_checkpoint)) # with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement = False, device_count={'CPU':8, 'GPU':1})) as sess: with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess: # runtime profile if FLAGS.profile: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() else: run_options = None run_metadata = None mylog("Creating Model.. (this can take a few minutes)") model = create_model(sess, embAttr, START_ID, run_options, run_metadata) show_all_variables() # Data Iterators dite = DataIterator(model, train_set, len(train_buckets_scale), batch_size, train_buckets_scale) iteType = 0 if iteType == 0: mylog("withRandom") ite = dite.next_random() elif iteType == 1: mylog("withSequence") ite = dite.next_sequence() # statistics during training step_time, loss = 0.0, 0.0 current_step = 0 previous_losses = [] his = [] low_ppx = float("inf") low_ppx_step = 0 steps_per_report = 30 n_targets_report = 0 report_time = 0 n_valid_sents = 0 patience = FLAGS.patience item_sampled, item_sampled_id2idx = None, None while current_step < total_steps: # start start_time = time.time() # re-sample every once a while if FLAGS.loss in ['mw', 'mce' ] and current_step % FLAGS.n_resample == 0: item_sampled, item_sampled_id2idx = sample_items( item_population, FLAGS.n_sampled, p_item) else: item_sampled = None # data and train users, inputs, outputs, weights, bucket_id = ite.next() L = model.step(sess, users, inputs, outputs, weights, bucket_id, item_sampled=item_sampled, item_sampled_id2idx=item_sampled_id2idx) # loss and time step_time += (time.time() - start_time) / steps_per_checkpoint loss += L current_step += 1 n_valid_sents += np.sum(np.sign(weights[0])) # for report report_time += (time.time() - start_time) n_targets_report += np.sum(weights) if current_step % steps_per_report == 0: mylog("--------------------" + "Report" + str(current_step) + "-------------------") mylog( "StepTime: {} Speed: {} targets / sec in total {} targets". format(report_time / steps_per_report, n_targets_report * 1.0 / report_time, n_targets_train)) report_time = 0 n_targets_report = 0 # Create the Timeline object, and write it to a json if FLAGS.profile: tl = timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() with open('timeline.json', 'w') as f: f.write(ctf) exit() if current_step % steps_per_checkpoint == 0: mylog("--------------------" + "TRAIN" + str(current_step) + "-------------------") # Print statistics for the previous epoch. loss = loss / n_valid_sents perplexity = math.exp( float(loss)) if loss < 300 else float("inf") mylog( "global step %d learning rate %.4f step-time %.2f perplexity " "%.2f" % (model.global_step.eval(), model.learning_rate.eval(), step_time, perplexity)) train_ppx = perplexity # Save checkpoint and zero timer and loss. step_time, loss, n_valid_sents = 0.0, 0.0, 0 # dev data mylog("--------------------" + "DEV" + str(current_step) + "-------------------") eval_loss, eval_ppx = evaluate( sess, model, dev_set, item_sampled_id2idx=item_sampled_id2idx) mylog("dev: ppx: {}".format(eval_ppx)) his.append([current_step, train_ppx, eval_ppx]) if eval_ppx < low_ppx: patience = FLAGS.patience low_ppx = eval_ppx low_ppx_step = current_step checkpoint_path = os.path.join(FLAGS.train_dir, "best.ckpt") mylog("Saving best model....") s = time.time() model.saver.save(sess, checkpoint_path, global_step=0, write_meta_graph=False) mylog("Best model saved using {} sec".format(time.time() - s)) else: patience -= 1 if patience <= 0: mylog("Training finished. Running out of patience.") break sys.stdout.flush()