def wrapper(**kwargs): opt = tf.sg_opt(kwargs) # default training options opt += tf.sg_opt(lr=0.001, save_dir='asset/train', max_ep=1000, ep_size=100000, save_interval=600, log_interval=60, early_stop=True, lr_reset=False, eval_metric=[], max_keep=5, keep_interval=1, tqdm=True, console_log=False) # make directory if not exist if not os.path.exists(opt.save_dir + '/log'): os.makedirs(opt.save_dir + '/log') if not os.path.exists(opt.save_dir + '/ckpt'): os.makedirs(opt.save_dir + '/ckpt') # find last checkpoint last_file = tf.train.latest_checkpoint(opt.save_dir + '/ckpt') if last_file: ep = start_ep = int(last_file.split('-')[1]) + 1 start_step = int(last_file.split('-')[2]) else: ep = start_ep = 1 start_step = 0 # checkpoint saver saver = tf.train.Saver(max_to_keep=opt.max_keep, keep_checkpoint_every_n_hours=opt.keep_interval) # summary writer summary_writer = tf.train.SummaryWriter(opt.save_dir + '/log', graph=tf.get_default_graph()) # add learning rate summary with tf.name_scope('summary'): tf.scalar_summary('60. learning_rate/learning_rate', _learning_rate) # add evaluation metric summary for m in opt.eval_metric: tf.sg_summary_metric(m) # summary op summary_op = tf.merge_all_summaries() # create session if opt.sess: sess = opt.sess else: # session with multiple GPU support sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # initialize variables sg_init(sess) # restore last checkpoint if last_file: saver.restore(sess, last_file) # set learning rate if start_ep == 1 or opt.lr_reset: sess.run(_learning_rate.assign(opt.lr)) # logging tf.sg_info('Training started from epoch[%03d]-step[%d].' % (start_ep, start_step)) try: # start data queue runner with tf.sg_queue_context(sess): # set session mode to train tf.sg_set_train(sess) # loss history for learning rate decay loss, loss_prev, early_stopped = None, None, False # time stamp for saving and logging last_saved = last_logged = time.time() # epoch loop for ep in range(start_ep, opt.max_ep + 1): # show progressbar if opt.tqdm: iterator = tqdm(range(opt.ep_size), desc='train', ncols=70, unit='b', leave=False) else: iterator = range(opt.ep_size) # batch loop for _ in iterator: # call train function batch_loss = func(sess, opt) # loss history update if batch_loss is not None: if loss is None: loss = np.mean(batch_loss) else: loss = loss * 0.9 + np.mean(batch_loss) * 0.1 # saving if time.time() - last_saved > opt.save_interval: last_saved = time.time() saver.save(sess, opt.save_dir + '/ckpt/model-%03d' % ep, write_meta_graph=False, global_step=sess.run( tf.sg_global_step())) # logging if time.time() - last_logged > opt.log_interval: last_logged = time.time() # set session mode to infer tf.sg_set_infer(sess) # run evaluation op if len(opt.eval_metric) > 0: sess.run(opt.eval_metric) if opt.console_log: # console logging # log epoch information tf.sg_info( '\tEpoch[%03d:lr=%7.5f:gs=%d] - loss = %s' % (ep, sess.run(_learning_rate), sess.run(tf.sg_global_step()), ('NA' if loss is None else '%8.6f' % loss))) else: # tensorboard logging # run logging op summary_writer.add_summary( sess.run(summary_op), global_step=sess.run(tf.sg_global_step())) # learning rate decay if opt.early_stop and loss_prev: # if loss stalling if loss >= 0.95 * loss_prev: # early stopping current_lr = sess.run(_learning_rate) if current_lr < 5e-6: early_stopped = True break else: # decrease learning rate by half sess.run( _learning_rate.assign(current_lr / 2.)) # update loss history loss_prev = loss # revert session mode to train tf.sg_set_train(sess) # log epoch information if not opt.console_log: tf.sg_info( '\tEpoch[%03d:lr=%7.5f:gs=%d] - loss = %s' % (ep, sess.run(_learning_rate), sess.run(tf.sg_global_step()), ('NA' if loss is None else '%8.6f' % loss))) if early_stopped: tf.sg_info('\tEarly stopped ( no loss progress ).') break finally: # save last epoch saver.save(sess, opt.save_dir + '/ckpt/model-%03d' % ep, write_meta_graph=False, global_step=sess.run(tf.sg_global_step())) # set session mode to infer tf.sg_set_infer(sess) # logging tf.sg_info('Training finished at epoch[%d]-step[%d].' % (ep, sess.run(tf.sg_global_step()))) # close session if opt.sess is None: sess.close()
def generate(): dev = '/cpu:0' with tf.device(dev): mydir = 'tfrc150char_wrd0704' files = [f for f in listdir(mydir) if isfile(join(mydir, f))] tfrecords_filename = [] tfrecords_filename = [join(mydir, 'short_infer3.tfrecords') ] #[join(mydir, f) for f in tfrecords_filename] tfrecords_filename_inf = [join(mydir, '11_3.tfrecords')] print(tfrecords_filename) filename_queue = tf.train.string_input_producer(tfrecords_filename, num_epochs=num_epochs, shuffle=True, capacity=1) infer_queue = tf.train.string_input_producer(tfrecords_filename_inf, num_epochs=num_epochs, shuffle=True, capacity=1) optim = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.99) # Calculate the gradients for each model tower. tower_grads = [] reuse_vars = False with tf.variable_scope("dec_lstm") as scp: dec_cell = BasicLSTMCell2(Hp.w_emb_size, Hp.rnn_hd, state_is_tuple=True) with tf.variable_scope("contx_lstm") as scp: cell = BasicLSTMCell2(Hp.hd, Hp.rnn_hd, state_is_tuple=True) rnn_cell = tf.contrib.rnn.DropoutWrapper( cell, input_keep_prob=Hp.keep_prob, output_keep_prob=Hp.keep_prob) (words, chars) = read_and_decode(filename_queue, Hp.batch_size * Hp.num_gpus) words_splits = tf.split(axis=0, num_or_size_splits=Hp.num_gpus, value=words) chars_splits = tf.split(axis=0, num_or_size_splits=Hp.num_gpus, value=chars) word_emb = np.loadtxt("glove300d_0704.txt") Hp.word_vs = word_emb.shape[0] # -------------------------------------------------------------------------------- with tf.name_scope('%s_%d' % ("tower", 0)) as scope: rnn_state = tower_infer_enc(chars_splits[0], scope, rnn_cell, dec_cell, word_emb, out_reuse_vars=False, dev='/cpu:0') chars_pl = tf.placeholder(tf.int32, shape=(None, Hp.c_maxlen)) rnn_state_pl1 = [ tf.placeholder(tf.float32, shape=(None, Hp.rnn_hd)), tf.placeholder(tf.float32, shape=(None, Hp.rnn_hd)) ] rnn_state_pl = tf.contrib.rnn.LSTMStateTuple( rnn_state_pl1[0], rnn_state_pl1[1]) final_ids, rnn_state_dec = tower_infer_dec(chars_pl, scope, rnn_cell, dec_cell, word_emb, rnn_state_pl, out_reuse_vars=False, dev='/cpu:0') # -------------------------------------------------------------------------------- saver = tf.train.Saver(tf.trainable_variables()) session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) session_config.gpu_options.per_process_gpu_memory_fraction = 0.94 session_config.gpu_options.allow_growth = False restore_dir = 'tnsrbrd/hin17d08m_1313g2' # lec30d07m_1634g2 lec04d07m_2006g2 lec28d07m_1221g2 lec31d07m_1548g2 csv_file = join(restore_dir, time.strftime("hin%dd%mm_%H%M.csv")) csv_f = open(csv_file, 'a') csv_writer = csv.writer(csv_f) with tf.Session(config=session_config) as sess: sess.run( tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())) tf.train.start_queue_runners(sess=sess) saver.restore(sess, tf.train.latest_checkpoint( join(restore_dir, 'last_chpt'))) # lec04d07m_2006g2 coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) for ep in range(num_epochs): tf.sg_set_infer(sess) rnn_state_val, w_txt, ch_txt = sess.run( [rnn_state, words_splits[0], chars_splits[0]], feed_dict={Hp.keep_prob: 1.0}) predictions = [] #[w_txt[:,2,:]] for idx in range(3): char_inpt = word2char_ids( ids_val) if idx != 0 else ch_txt[:, 2, :] ids_val, rnn_state_val = sess.run( [final_ids, rnn_state_dec], feed_dict={ Hp.keep_prob: 1.0, rnn_state_pl1[0]: rnn_state_val[0], rnn_state_pl1[1]: rnn_state_val[1], chars_pl: char_inpt }) temp = np.zeros((Hp.batch_size, Hp.w_maxlen)) for b in range(Hp.batch_size): stop_ind = np.where(ids_val[b] == 2)[0] if stop_ind.size > 0: stop_ind = stop_ind[0] ids_val[b, stop_ind + 1:] = ids_val[b, stop_ind + 1:] * 0 temp[:, :ids_val.shape[1]] = ids_val predictions.append(temp) # predictions are decode_sent x b x w_maxlen predictions = np.array(predictions) in_batches = [w_txt[b, :, :] for b in range(Hp.batch_size)] res_batches = [ predictions[:, b, :] for b in range(Hp.batch_size) ] for b in range(Hp.batch_size): in_paragraph = idxword2txt(in_batches[b]) print("\n INPUT SAMPLE \n") print(in_paragraph) res_paragraph = idxword2txt(res_batches[b]) print("\n RESULTS \n") print(res_paragraph) csv_writer.writerow([ " ".join(in_paragraph[:3]), " ".join(in_paragraph[3:]), " ".join(res_paragraph) ]) csv_f.close()
def wrapper(**kwargs): r""" Manages arguments of `tf.sg_opt`. Args: **kwargs: lr: A Python Scalar (optional). Learning rate. Default is .001. eval_metric: A list of tensors containing the value to evaluate. Default is []. early_stop: Boolean. If True (default), the training should stop when the following two conditions are met. i. Current loss is less than .95 * previous loss. ii. Current learning rate is less than 5e-6. lr_reset: Boolean. If True, learning rate is set to opt.lr. when training restarts. Otherwise (Default), the value of the stored `_learning_rate` is taken. save_dir: A string. The root path to which checkpoint and log files are saved. Default is `asset/train`. max_ep: A positive integer. Maximum number of epochs. Default is 1000. ep_size: A positive integer. Number of Total batches in an epoch. For proper display of log. Default is 1e5. save_interval: A Python scalar. The interval of saving checkpoint files. By default, for every 600 seconds, a checkpoint file is written. log_interval: A Python scalar. The interval of recoding logs. By default, for every 60 seconds, logging is executed. max_keep: A positive integer. Maximum number of recent checkpoints to keep. Default is 5. keep_interval: A Python scalar. How often to keep checkpoints. Default is 1 hour. tqdm: Boolean. If True (Default), progress bars are shown. console_log: Boolean. If True, a series of loss will be shown on the console instead of tensorboard. Default is False. """ opt = tf.sg_opt(kwargs) # default training options opt += tf.sg_opt(lr=0.001, save_dir='asset/train', max_ep=1000, ep_size=100000, save_interval=600, log_interval=60, early_stop=True, lr_reset=False, eval_metric=[], max_keep=5, keep_interval=1, tqdm=True, console_log=False) # make directory if not exist if not os.path.exists(opt.save_dir): os.makedirs(opt.save_dir) # find last checkpoint last_file = tf.train.latest_checkpoint(opt.save_dir) if last_file: ep = start_ep = int(last_file.split('-')[1]) + 1 start_step = int(last_file.split('-')[2]) else: ep = start_ep = 1 start_step = 0 # checkpoint saver saver = tf.train.Saver(max_to_keep=opt.max_keep, keep_checkpoint_every_n_hours=opt.keep_interval) # summary writer summary_writer = tf.summary.FileWriter(opt.save_dir, graph=tf.get_default_graph()) # add learning rate summary tf.summary.scalar('learning_r', _learning_rate) # add evaluation metric summary for m in opt.eval_metric: tf.sg_summary_metric(m) # summary op summary_op = tf.summary.merge_all() # create session if opt.sess: sess = opt.sess else: # session with multiple GPU support sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # initialize variables sg_init(sess) # restore last checkpoint if last_file: saver.restore(sess, last_file) # set learning rate if start_ep == 1 or opt.lr_reset: sess.run(_learning_rate.assign(opt.lr)) # logging tf.sg_info('Training started from epoch[%03d]-step[%d].' % (start_ep, start_step)) try: # start data queue runner with tf.sg_queue_context(sess): # set session mode to train tf.sg_set_train(sess) # loss history for learning rate decay loss, loss_prev, early_stopped = None, None, False # time stamp for saving and logging last_saved = last_logged = time.time() # epoch loop for ep in range(start_ep, opt.max_ep + 1): # show progressbar if opt.tqdm: iterator = tqdm(range(opt.ep_size), desc='train', ncols=70, unit='b', leave=False) else: iterator = range(opt.ep_size) # batch loop for _ in iterator: # call train function batch_loss = func(sess, opt) # loss history update if batch_loss is not None: if loss is None: loss = np.mean(batch_loss) else: loss = loss * 0.9 + np.mean(batch_loss) * 0.1 # saving if time.time() - last_saved > opt.save_interval: last_saved = time.time() saver.save(sess, opt.save_dir + '/model-%03d' % ep, write_meta_graph=False, global_step=sess.run(tf.sg_global_step())) # logging if time.time() - last_logged > opt.log_interval: last_logged = time.time() # set session mode to infer tf.sg_set_infer(sess) # run evaluation op if len(opt.eval_metric) > 0: sess.run(opt.eval_metric) if opt.console_log: # console logging # log epoch information tf.sg_info('\tEpoch[%03d:lr=%7.5f:gs=%d] - loss = %s' % (ep, sess.run(_learning_rate), sess.run(tf.sg_global_step()), ('NA' if loss is None else '%8.6f' % loss))) else: # tensorboard logging # run logging op summary_writer.add_summary(sess.run(summary_op), global_step=sess.run(tf.sg_global_step())) # learning rate decay if opt.early_stop and loss_prev: # if loss stalling if loss >= 0.95 * loss_prev: # early stopping current_lr = sess.run(_learning_rate) if current_lr < 5e-6: early_stopped = True break else: # decrease learning rate by half sess.run(_learning_rate.assign(current_lr / 2.)) # update loss history loss_prev = loss # revert session mode to train tf.sg_set_train(sess) # log epoch information if not opt.console_log: tf.sg_info('\tEpoch[%03d:lr=%7.5f:gs=%d] - loss = %s' % (ep, sess.run(_learning_rate), sess.run(tf.sg_global_step()), ('NA' if loss is None else '%8.6f' % loss))) if early_stopped: tf.sg_info('\tEarly stopped ( no loss progress ).') break finally: # save last epoch saver.save(sess, opt.save_dir + '/model-%03d' % ep, write_meta_graph=False, global_step=sess.run(tf.sg_global_step())) # set session mode to infer tf.sg_set_infer(sess) # logging tf.sg_info('Training finished at epoch[%d]-step[%d].' % (ep, sess.run(tf.sg_global_step()))) # close session if opt.sess is None: sess.close()