os.system("cp %s %s" % (train_norm_dir, test_norm_dir)) if mode == 0: set_path = ps.PathSetting(prj_dir, 'ACAM', save_dir) logs_dir = set_path.logs_dir os.system("rm -rf " + logs_dir + '/train') os.system("rm -rf " + logs_dir + '/valid') os.system("mkdir " + logs_dir + '/train') os.system("mkdir " + logs_dir + '/valid') Vp.main(save_dir, prj_dir, 'ACAM', 'train', dev='/gpu:' + gpu_no) gs.freeze_graph(prj_dir + '/logs/ACAM', prj_dir + '/saved_model/graph/ACAM', 'model_1/logits,model_1/raw_labels') if mode == 1: set_path = ps.PathSetting(prj_dir, 'bDNN', save_dir) logs_dir = set_path.logs_dir os.system("rm -rf " + logs_dir + '/train') os.system("rm -rf " + logs_dir + '/valid') os.system("mkdir " + logs_dir + '/train') os.system("mkdir " + logs_dir + '/valid') Vb.main(save_dir, prj_dir, 'bDNN', 'train', dev='/gpu:' + gpu_no) gs.freeze_graph(prj_dir + '/logs/bDNN', prj_dir + '/saved_model/graph/bDNN',
def main(save_dir, prj_dir=None, model=None, mode=None, dev="/gpu:2"): # Configuration Part # # os.environ["CUDA_VISIBLE_DEVICES"] = '3' device = dev os.environ["CUDA_VISIBLE_DEVICES"] = device[-1] if mode is 'train': import path_setting as ps set_path = ps.PathSetting(prj_dir, model,save_dir) logs_dir = initial_logs_dir = set_path.logs_dir input_dir = set_path.input_dir output_dir = set_path.output_dir norm_dir = set_path.norm_dir valid_file_dir = set_path.valid_file_dir sys.path.insert(0, prj_dir+'/configure/LSTM') import config as cg global seq_size, batch_num seq_size = cg.seq_len batch_num = cg.num_batches global learning_rate, dropout_rate, max_epoch, batch_size, valid_batch_size learning_rate = cg.lr dropout_rate = cg.dropout_rate max_epoch = cg.max_epoch batch_size = valid_batch_size = batch_num*seq_size global target_delay target_delay = cg.target_delay global lstm_cell_size, num_layers lstm_cell_size = cg.cell_size num_layers = cg.num_layers # Graph Part # print("Graph initialization...") with tf.device(device): with tf.variable_scope("model", reuse=None): m_train = Model(is_training=True) with tf.variable_scope("model", reuse=True): m_valid = Model(is_training=False) print("Done") # Summary Part # print("Setting up summary op...") summary_ph = tf.placeholder(dtype=tf.float32) with tf.variable_scope("Training_procedure"): cost_summary_op = tf.summary.scalar("cost", summary_ph) accuracy_summary_op = tf.summary.scalar("accuracy", summary_ph) print("Done") # Model Save Part # print("Setting up Saver...") saver = tf.train.Saver() ckpt = tf.train.get_checkpoint_state(logs_dir) print("Done") # Session Part # sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) sess_config.gpu_options.allow_growth = True sess = tf.Session(config=sess_config) if mode is 'train': train_summary_writer = tf.summary.FileWriter(logs_dir + '/train/', sess.graph, max_queue=2) valid_summary_writer = tf.summary.FileWriter(logs_dir + '/valid/', max_queue=2) if ckpt and ckpt.model_checkpoint_path: # model restore print("Model restored...") if mode is 'train': saver.restore(sess, ckpt.model_checkpoint_path) else: saver.restore(sess, initial_logs_dir+ckpt_name) # saver.restore(sess, logs_dir+ckpt_name) saver.save(sess, logs_dir + "/model_LSTM.ckpt", 0) # model save print("Done") else: sess.run(tf.global_variables_initializer()) # if the checkpoint doesn't exist, do initialization if mode is 'train': train_data_set = dr.DataReader(input_dir, output_dir, norm_dir, target_delay=target_delay, u=u, name="train") # training data reader initialization if mode is 'train': file_len = train_data_set.get_file_len() MAX_STEP = max_epoch * file_len print(get_num_params()) for itr in range(MAX_STEP): train_inputs, train_labels = train_data_set.next_batch(seq_size) one_hot_labels = train_labels.reshape((-1, 1)) one_hot_labels = dense_to_one_hot(one_hot_labels, num_classes=2) feed_dict = {m_train.inputs: train_inputs, m_train.labels: one_hot_labels, m_train.keep_probability: dropout_rate} sess.run(m_train.train_op, feed_dict=feed_dict) if itr % 100 == 0 and itr >= 0: train_cost, train_accuracy, train_rate, train_pre = sess.run([m_train.cost, m_train.accuracy, m_train.lr, m_train.pred], feed_dict=feed_dict) # print(logits) print("Step: %d, train_cost: %.4f, train_accuracy=%4.4f lr=%.8f" % (itr, train_cost, train_accuracy*100, train_rate)) pass train_cost_summary_str = sess.run(cost_summary_op, feed_dict={summary_ph: train_cost}) train_accuracy_summary_str = sess.run(accuracy_summary_op, feed_dict={summary_ph: train_accuracy}) train_summary_writer.add_summary(train_cost_summary_str, itr) # write the train phase summary to event files train_summary_writer.add_summary(train_accuracy_summary_str, itr) if itr % file_len == 0 and itr > 0: saver.save(sess, logs_dir + "/model.ckpt", itr) # model save print('validation start!') valid_accuracy, valid_cost = \ utils.do_validation(m_valid, sess, valid_file_dir, norm_dir, type='LSTM') print("valid_cost: %.4f, valid_accuracy=%4.4f" % (valid_cost, valid_accuracy * 100)) valid_cost_summary_str = sess.run(cost_summary_op, feed_dict={summary_ph: valid_cost}) valid_accuracy_summary_str = sess.run(accuracy_summary_op, feed_dict={summary_ph: valid_accuracy}) valid_summary_writer.add_summary(valid_cost_summary_str, itr) # write the train phase summary to event files valid_summary_writer.add_summary(valid_accuracy_summary_str, itr) gs.freeze_graph(prj_dir + '/logs/LSTM', prj_dir + '/saved_model/graph/LSTM', 'model_1/soft_pred,model_1/raw_labels') elif mode is 'test': final_softout, final_label = utils.vad_test3(m_valid, sess, valid_batch_size, test_file_dir, norm_dir, data_len, eval_type) if data_len is None: return final_softout, final_label else: return final_softout[0:data_len, :], final_label[0:data_len, :]
os.popen('rm -rf ' + './data/valid/clean/*.npy') os.popen('rm -rf ' + './data/valid/clean/*.bin') # os.popen('rm -rf ' + '/home/jtkim/hdd3/github/SE_data_raw/data/train/noisy/*.npy') # os.popen('rm -rf ' + '/home/jtkim/hdd3/github/SE_data_raw/data/train/noisy/*.bin') # # os.popen('rm -rf ' + '/home/jtkim/hdd3/github/SE_data_raw/data/train/clean/*.npy') # os.popen('rm -rf ' + '/home/jtkim/hdd3/github/SE_data_raw/data/train/clean/*.bin') # # os.popen('rm -rf ' + '/home/jtkim/hdd3/github/SE_data_raw/data/valid/noisy/*.npy') # os.popen('rm -rf ' + '/home/jtkim/hdd3/github/SE_data_raw/data/valid/noisy/*.bin') # # os.popen('rm -rf ' + '/home/jtkim/hdd3/github/SE_data_raw/data/valid/clean/*.npy') # os.popen('rm -rf ' + '/home/jtkim/hdd3/github/SE_data_raw/data/valid/clean/*.bin') # model train if not test_only: tr.main([prj_dir, logs_dir]) # save graph # logs_dir = '/home/jtkim/github/SE_ref/Speech-enhancement/SE/logs/logs_2018-08-24-01-50-12' gs.freeze_graph(logs_dir, save_dir, 'model_1/pred,model_1/labels,model_1/cost') print("Training was ended!") test.test()
import graph_save as gs prj_dir = '/home/sbie/storage3/github/VAD_Toolkit/VAD' gs.freeze_graph(prj_dir + '/saved_model/temp/temp_LSTM', prj_dir + '/saved_model/graph/LSTM', 'model_1/soft_pred,model_1/raw_labels') # gs.freeze_graph(prj_dir + '/saved_model/temp_ACAM', prj_dir + '/saved_model/ACAM', 'model_1/logits,model_1/raw_labels')
def main(save_dir, prj_dir=None, model=None, mode=None, dev="/gpu:2"): # Configuration Part # # os.environ["CUDA_VISIBLE_DEVICES"] = '3' device = dev os.environ["CUDA_VISIBLE_DEVICES"] = device[-1] if mode is 'train': import path_setting as ps set_path = ps.PathSetting(prj_dir, model, save_dir) logs_dir = initial_logs_dir = set_path.logs_dir input_dir = set_path.input_dir output_dir = set_path.output_dir norm_dir = set_path.norm_dir valid_file_dir = set_path.valid_file_dir sys.path.insert(0, prj_dir + '/configure/ACAM') import config as cg global initLr, dropout_rate, max_epoch, batch_size, valid_batch_size initLr = cg.lr dropout_rate = cg.dropout_rate max_epoch = cg.max_epoch batch_size = valid_batch_size = cg.batch_size global w, u w = cg.w u = cg.u global bdnn_winlen, bdnn_inputsize, bdnn_outputsize bdnn_winlen = (((w - 1) / u) * 2) + 3 bdnn_inputsize = int(bdnn_winlen * num_features) bdnn_outputsize = int(bdnn_winlen) global glimpse_hidden, bp_hidden, glimpse_out, bp_out, nGlimpses,\ lstm_cell_size, action_hidden_1, action_hidden_2 glimpse_hidden = cg.glimpse_hidden bp_hidden = cg.bp_hidden glimpse_out = bp_out = cg.glimpse_out nGlimpses = cg.nGlimpse # 7 lstm_cell_size = cg.lstm_cell_size action_hidden_1 = cg.action_hidden_1 # default : 256 action_hidden_2 = cg.action_hidden_2 # default : 256 # Graph Part # mean_acc_list = [] var_acc_list = [] print('Mode : ' + mode) print("Graph initialization...") with tf.device(device): with tf.variable_scope("model", reuse=None): m_train = Model(batch_size=batch_size, reuse=None, is_training=True) # m_train(batch_size) with tf.device(device): with tf.variable_scope("model", reuse=True): m_valid = Model(batch_size=valid_batch_size, reuse=True, is_training=False) print("Done") # Summary Part # print("Setting up summary op...") summary_ph = tf.placeholder(dtype=tf.float32) with tf.variable_scope("Training_procedure"): cost_summary_op = tf.summary.scalar("cost", summary_ph) accuracy_summary_op = tf.summary.scalar("accuracy", summary_ph) # train_summary_writer = tf.summary.FileWriter(logs_dir + '/train/', max_queue=4) # valid_summary_writer = tf.summary.FileWriter(logs_dir + '/valid/', max_queue=4) # summary_dic = summary_generation(valid_file_dir) print("Done") # Model Save Part # print("Setting up Saver...") saver = tf.train.Saver() ckpt = tf.train.get_checkpoint_state(initial_logs_dir) print("Done") # Session Part # sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) sess_config.gpu_options.allow_growth = True sess = tf.Session(config=sess_config) if mode is 'train': train_summary_writer = tf.summary.FileWriter(logs_dir + '/train/', sess.graph, max_queue=2) valid_summary_writer = tf.summary.FileWriter(logs_dir + '/valid/', max_queue=2) if ckpt and ckpt.model_checkpoint_path: # model restore print("Model restored...") print(initial_logs_dir + ckpt_name) if mode is 'train': saver.restore(sess, ckpt.model_checkpoint_path) else: saver.restore(sess, initial_logs_dir + ckpt_name) saver.save(sess, initial_logs_dir + "/model_ACAM.ckpt", 0) # model save print("Done") else: sess.run(tf.global_variables_initializer() ) # if the checkpoint doesn't exist, do initialization if mode is 'train': train_data_set = dr.DataReader( input_dir, output_dir, norm_dir, w=w, u=u, name="train") # training data reader initialization if mode is 'train': file_len = train_data_set.get_file_len() MAX_STEP = max_epoch * file_len print(get_num_params()) for itr in range(MAX_STEP): start_time = time.time() train_inputs, train_labels = train_data_set.next_batch(batch_size) feed_dict = { m_train.inputs: train_inputs, m_train.labels: train_labels, m_train.keep_probability: dropout_rate } sess.run(m_train.train_op, feed_dict=feed_dict) if itr % 100 == 0 and itr >= 0: train_cost, train_reward, train_avg_b, train_rminusb, train_p_bps, train_lr, train_res \ = sess.run([m_train.cost, m_train.reward, m_train.avg_b, m_train.rminusb, m_train.p_bps, m_train.print_lr,m_train.result] , feed_dict=feed_dict) duration = time.time() - start_time print( "Step: %d, cost: %.4f, accuracy: %4.4f, b: %4.4f, R-b: %4.4f, p_bps: %4.4f, lr: %7.6f (%.3f sec)" % (itr, train_cost, train_reward, train_avg_b, train_rminusb, train_p_bps, train_lr, duration)) # np.save('pre/'+train_data_set.get_cur_file_name().split('/')[-1], train_res) train_cost_summary_str = sess.run( cost_summary_op, feed_dict={summary_ph: train_cost}) train_accuracy_summary_str = sess.run( accuracy_summary_op, feed_dict={summary_ph: train_reward}) train_summary_writer.add_summary(train_cost_summary_str, itr) train_summary_writer.add_summary(train_accuracy_summary_str, itr) if itr % file_len == 0 and itr > 0: saver.save(sess, logs_dir + "/model.ckpt", itr) # model save print('validation start!') valid_accuracy, valid_cost = \ utils.do_validation(m_valid, sess, valid_file_dir, norm_dir, type='ACAM') print("valid_cost: %.4f, valid_accuracy=%4.4f" % (valid_cost, valid_accuracy * 100)) valid_cost_summary_str = sess.run( cost_summary_op, feed_dict={summary_ph: valid_cost}) valid_accuracy_summary_str = sess.run( accuracy_summary_op, feed_dict={summary_ph: valid_accuracy}) valid_summary_writer.add_summary( valid_cost_summary_str, itr) # write the train phase summary to event files valid_summary_writer.add_summary(valid_accuracy_summary_str, itr) gs.freeze_graph(prj_dir + '/logs/ACAM', prj_dir + '/saved_model/graph/ACAM', 'model_1/logits,model_1/raw_labels') elif mode == 'test': final_softout, final_label = utils.vad_test(m_valid, sess, valid_batch_size, test_file_dir, norm_dir, data_len, eval_type) if data_len is None: return final_softout, final_label else: return final_softout[0:data_len, :], final_label[0:data_len, :]