def main(_): model_path = os.path.join('model', FLAGS.name) if os.path.exists(model_path) is False: os.makedirs(model_path) with codecs.open(FLAGS.input_file, encoding='utf-8') as f: text = f.read() converter = TextConverter(text, FLAGS.max_vocab) converter.save_to_file(os.path.join(model_path, 'converter.pkl')) arr = converter.text_to_arr(text) g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps) print(converter.vocab_size) model = CharRNN(converter.vocab_size, num_seqs=FLAGS.num_seqs, num_steps=FLAGS.num_steps, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, learning_rate=FLAGS.learning_rate, train_keep_prob=FLAGS.train_keep_prob, use_embedding=FLAGS.use_embedding, embedding_size=FLAGS.embedding_size) model.train( g, FLAGS.max_steps, model_path, FLAGS.save_every_n, FLAGS.log_every_n, )
def main(_): model_path = os.path.join('model', FLAGS.name) if os.path.exists(model_path) is False: os.makedirs(model_path) with codecs.open(FLAGS.input_file, encoding='utf-8') as f: text = f.read() converter = TextConverter(text, FLAGS.max_vocab) converter.save_to_file(os.path.join(model_path, 'converter.pkl')) arr = converter.text_to_arr(text) g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps) print(converter.vocab_size) model = CharRNN(converter.vocab_size, num_seqs=FLAGS.num_seqs, num_steps=FLAGS.num_steps, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, learning_rate=FLAGS.learning_rate, train_keep_prob=FLAGS.train_keep_prob, use_embedding=FLAGS.use_embedding, embedding_size=FLAGS.embedding_size ) model.train(g, FLAGS.max_steps, model_path, FLAGS.save_every_n, FLAGS.log_every_n, )
def main(_): model_path = os.path.join('model', FLAGS.name) # 保存模型的路径 if os.path.exists(model_path) is False: os.makedirs(model_path) # 用codecs提供的open方法来指定打开的文件的语言编码,它会在读取的时候自动转换为内部unicode with codecs.open(FLAGS.input_file, encoding='utf-8') as f: text = f.read() # 读取训练的文本 converter = TextConverter(text, FLAGS.max_vocab) # 转换text文本格式 converter.save_to_file(os.path.join(model_path, 'converter.pkl')) arr = converter.text_to_arr(text) # 转换text为数组 g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps) # 批生成 print(converter.vocab_size) model = CharRNN(converter.vocab_size, # 读取模型 num_seqs=FLAGS.num_seqs, num_steps=FLAGS.num_steps, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, learning_rate=FLAGS.learning_rate, train_keep_prob=FLAGS.train_keep_prob, use_embedding=FLAGS.use_embedding, embedding_size=FLAGS.embedding_size ) model.train(g, # 训练 FLAGS.max_steps, model_path, FLAGS.save_every_n, FLAGS.log_every_n, )
def main(_): model_path = os.path.join('model', FLAGS.name) #print(model_path) if os.path.exists(model_path) is False: os.makedirs(model_path) with codecs.open(FLAGS.input_file, encoding='utf-8') as f: text = f.read() converter = TextConverter(text, FLAGS.max_vocab) converter.save_to_file(os.path.join(model_path, 'converter.pkl')) arr = converter.text_to_arr(text) g = batch_generator(arr, FLAGS.num_seq, FLAGS.num_step) print(converter.vocab_size) model = CharModel( converter.vocab_size, num_seq=FLAGS.num_seq, num_step=FLAGS.num_step, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, #learning_rate=FLAGS.learning_rate, train_keep_prob=FLAGS.train_keep_prob, #use_embedding=FLAGS.use_embedding, embedding_size=FLAGS.embedding_size, is_Training=True) #model.add_placeholder() #model.build_lstm() #model.build_loss() #model.build_optimizer() model.train(g, FLAGS.max_steps, model_path)
def main(_): model_path = os.path.join('model', FLAGS.name)#创建路径字符串 if os.path.exists(model_path) is False:#创建文件夹路径 os.makedirs(model_path) with codecs.open(FLAGS.input_file, encoding='utf-8') as f: text = f.read()#读取整个文件作为字符串 converter = TextConverter(text, FLAGS.max_vocab) converter.save_to_file(os.path.join(model_path, 'converter.pkl')) arr = converter.text_to_arr(text)#将文本序列化 g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps)#100,100 print(converter.vocab_size) model = CharRNN(converter.vocab_size,#创建模型,这里num_classes设置为了字典的大小,因为要预测下一个char num_seqs=FLAGS.num_seqs, num_steps=FLAGS.num_steps, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, learning_rate=FLAGS.learning_rate, train_keep_prob=FLAGS.train_keep_prob, use_embedding=FLAGS.use_embedding, embedding_size=FLAGS.embedding_size ) model.train(g,#训练模型 FLAGS.max_steps, model_path, FLAGS.save_every_n, FLAGS.log_every_n, )
def main(_): script_path = os.path.abspath(os.path.dirname(__file__)) model_path = os.path.join(script_path, 'model', FLAGS.name) if os.path.exists(model_path) is False: os.makedirs(model_path) with codecs.open(FLAGS.input_file, encoding='utf-8') as f: text = f.read() print("corpus size " + str(len(text))) if os.path.exists(FLAGS.whitelist_file): with codecs.open(FLAGS.whitelist_file, encoding='utf-8') as f: whitelist = f.read() text = remove_non_matching_chars(text, whitelist) converter = TextConverter(text, FLAGS.max_vocab) converter.save_to_file(os.path.join(model_path, 'converter.pkl')) arr = converter.text_to_arr(text) g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps) model = CharRNN(converter.vocab_size, num_seqs=FLAGS.num_seqs, num_steps=FLAGS.num_steps, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, learning_rate=FLAGS.learning_rate, train_keep_prob=FLAGS.train_keep_prob, use_embedding=FLAGS.use_embedding, embedding_size=FLAGS.embedding_size) model.train( g, FLAGS.max_steps, model_path, FLAGS.save_every_n, FLAGS.log_every_n, )
def main(_): model_path = os.path.join('model', FLAGS.file_type) if not os.path.exists(model_path): os.makedirs(model_path) # Read and Load Corpus for Train and Validation. training_corpus, validating_corpus = read_corpus() # Build Text Converter print( "---------------------------- Initializing Text Converter ----------------------------" ) start_time = time.time() converter = TextConverter(training_corpus, FLAGS.max_vocab) converter.save_to_file(os.path.join(model_path, 'converter.pkl')) print('Initialize Text Converter Finished in %.3f Seconds.\n' % (time.time() - start_time)) # Vectorize Content of Corpus vectroize_corpus(converter) # Build Char RNN Model model = CharRNN(converter.vocab_size, num_seqs=FLAGS.num_seqs, num_steps=FLAGS.num_steps, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, learning_rate=FLAGS.learning_rate, train_keep_prob=FLAGS.train_keep_prob, use_embedding=FLAGS.use_embedding, embedding_size=FLAGS.embedding_size) # Train Model model.train(FLAGS.max_steps, model_path, FLAGS.validate_every_n_steps, FLAGS.log_every_n_steps)
def test_save_file(self): testConverter = TextConverter(text=[ "We", "are", "accounted", "poor", "citizens,", "the", "patricians", "goodare", "accounted", "poor", "citizens,", "the", "patricians", "good" ], max_vocab=10) testConverter.save_to_file('test.pcl')
def initialize_converter(model_path): if not os.path.exists(model_path): os.makedirs(model_path) with codecs.open(FLAGS.input_file, encoding='utf-8') as f: text = f.read() converter_path = os.path.join(model_path, 'converter.pkl') if os.path.exists(converter_path): converter = TextConverter(filename=converter_path) else: converter = TextConverter(text, FLAGS.max_vocab) converter.save_to_file(converter_path) arr = converter.text_to_arr(text) return arr, converter
def main(_): model_path = os.path.join('model', FLAGS.name) print(model_path) if os.path.exists(model_path) is False: os.makedirs(model_path) path_exist = False else: path_exist = True with codecs.open(FLAGS.input_file, encoding='utf-8') as f: text = f.read() converter = TextConverter(text, FLAGS.max_vocab) converter.save_to_file(os.path.join(model_path, 'converter.pkl')) arr = converter.text_to_arr(text) g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps) print(converter.vocab_size) model = CharRNN(converter.vocab_size, num_seqs=FLAGS.num_seqs, num_steps=FLAGS.num_steps, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, learning_rate=FLAGS.learning_rate, train_keep_prob=FLAGS.train_keep_prob, use_embedding=FLAGS.use_embedding, embedding_size=FLAGS.embedding_size ) model_file_path = tf.train.latest_checkpoint(model_path) if path_exist: model.load(model_file_path) indexes = [] for dirpath, dirnames, filenames in os.walk(model_path): for name in filenames: filepath = os.path.join(dirpath, name) if filepath.endswith(".index"): indexes.append(int(name[6:-6])) indexes.sort() last_index = indexes[-1] model.step = last_index model.train(g, FLAGS.max_steps, model_path, FLAGS.save_every_n, FLAGS.log_every_n, )
def main(_): model_path = os.path.join('models', FLAGS.name) if os.path.exists(model_path) is False: os.makedirs(model_path) # excel data QAs = get_excel_QAs( FLAGS.input_file) # 要求excel文件格式,第一个表,第一列id,第二列query,第三列response # # xhj data # from read_utils import loadConversations # QAs = loadConversations(FLAGS.input_file) text = get_QAs_text(QAs) if os.path.exists(os.path.join(model_path, 'converter.pkl')) is False: print('词库文件不存在,创建...') converter = TextConverter(text, FLAGS.max_vocab) converter.save_to_file(os.path.join(model_path, 'converter.pkl')) else: converter = TextConverter( filename=os.path.join(model_path, 'converter.pkl')) QA_arrs = converter.QAs_to_arrs(QAs, FLAGS.num_steps) samples = converter.samples_for_train(QA_arrs) g = batch_generator(samples, FLAGS.num_seqs) print(converter.vocab_size) model = DualLSTM(converter.vocab_size, batch_size=FLAGS.num_seqs, num_steps=FLAGS.num_steps, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, learning_rate=FLAGS.learning_rate, train_keep_prob=FLAGS.train_keep_prob, use_embedding=FLAGS.use_embedding, embedding_size=FLAGS.embedding_size) model.train( g, FLAGS.max_steps, model_path, FLAGS.save_every_n, FLAGS.log_every_n, )
def main(_): model_path = os.path.join('models', FLAGS.file_name) if os.path.exists(model_path) is False: os.makedirs(model_path) if os.path.exists(os.path.join( model_path, 'converter.pkl')) or os.path.exists( os.path.join(model_path, 'QAs.pkl')) is False: print('词库文件不存在,创建...') QAs, text = load_origin_data('data/task3_train.txt') converter = TextConverter(text, 5000) converter.save_to_file(converter.vocab, os.path.join(model_path, 'converter.pkl')) converter.save_to_file(QAs, os.path.join(model_path, 'QAs.pkl')) else: converter = TextConverter( filename=os.path.join(model_path, 'converter.pkl')) QAs = converter.load_obj(filename=os.path.join(model_path, 'QAs.pkl')) QA_arrs = converter.QAs_to_arrs(QAs, FLAGS.num_steps) thres = int(len(QA_arrs) * 0.9) train_samples = QA_arrs[:thres] val_samples = QA_arrs[thres:] train_g = batch_generator(train_samples, FLAGS.batch_size) val_g = val_samples_generator(val_samples) print('use embeding:', FLAGS.use_embedding) print('vocab size:', converter.vocab_size) from model3 import Model model = Model(converter.vocab_size, FLAGS, test=False, embeddings=None) # 继续上一次模型训练 FLAGS.checkpoint_path = tf.train.latest_checkpoint(model_path) if FLAGS.checkpoint_path: model.load(FLAGS.checkpoint_path) model.train(train_g, FLAGS.max_steps, model_path, FLAGS.save_every_n, FLAGS.log_every_n, val_g)
def main(_): ## 对数据进行预处理。调用read_utils.py模块中的文本转换类TextConverter,获取经过频数挑选的字符并且得到相应的index。 ## 然后调用batch_generator函数得到一个batch生成器。 model_path = os.path.join('model', FLAGS.name) # 路径拼接 print("模型保存位置: ", model_path) if os.path.exists(model_path) is False: os.makedirs(model_path) # 递归创建目录 # Python读取文件中的汉字方法:导入codecs,添加encoding='utf-8' with codecs.open(FLAGS.input_file, encoding='utf-8') as f: print("建模训练数据来源", FLAGS.input_file) text = f.read() # 返回一个词典文件 converter = TextConverter(text, FLAGS.max_vocab) # 将经过频数挑选的字符序列化保存 converter.save_to_file(os.path.join(model_path, 'converter.pkl')) arr = converter.text_to_arr(text) #得到每个字符的index g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps) # 得到一个batch生长期 print(converter.vocab_size) # 打印字符数量 ## 数据处理完毕后,调用model.py模块的CharRNN类构造循环神经网络,最后调用train()函数对神经网络进行训练 model = CharRNN(converter.vocab_size, #字符分类的数量 num_seqs=FLAGS.num_seqs, #一个batch中的序列数 num_steps=FLAGS.num_steps, #一个序列中的字符数 lstm_size=FLAGS.lstm_size, #每个cell的节点数量 num_layers=FLAGS.num_layers, #RNN的层数 learning_rate=FLAGS.learning_rate, train_keep_prob=FLAGS.train_keep_prob, use_embedding=FLAGS.use_embedding, embedding_size=FLAGS.embedding_size ) model.train(g, FLAGS.max_steps, model_path, FLAGS.save_every_n, FLAGS.log_every_n, )
def train(): with tf.Session() as sess: model_path = os.path.join(FLAGS.train_dir, FLAGS.model_name) if (not os.path.exists(model_path)): os.makedirs(model_path) checkpoint_path = os.path.join(model_path, "generate.ckpt") with codecs.open(FLAGS.input_file, encoding='utf-8') as f: text = f.read() #.replace("\n", "") converter_path = os.path.join(model_path, 'converter.pkl') if (not os.path.exists(converter_path)): print("construct converter.") converter = TextConverter(text, FLAGS.max_vocab_size) converter.save_to_file(os.path.join(model_path, 'converter.pkl')) else: print("load converter") converter = TextConverter(None, FLAGS.max_vocab_size, converter_path) print("actual vocabulary size is: " + str(converter.vocab_size)) arr = converter.text_to_arr(text) sent_len_p = [ 1.0 / len(train_sentence_length) for l in train_sentence_length ] max_time = np.random.choice(train_sentence_length, 1, p=sent_len_p)[0] batch_cnt = get_batch_cnt(arr, FLAGS.batch_size, max_time) current_step_batch = 0 # create model print("Creating %d layers of %d units for max time %d." % (FLAGS.num_layers, FLAGS.lstm_size, max_time)) model = create_model(sess, converter.vocab_size, False, model_path) if (FLAGS.set_learning_rate > 0): model.set_learning_rate(sess, FLAGS.set_learning_rate) loss_per_checkpoint = 0.0 current_step = 0 previous_losses = [] initial_state = sess.run(model.initial_state) while True: g = batch_generator(arr, FLAGS.batch_size, max_time) for inputs, targets in g: start_time = time.time() batch_loss, final_state = model.train_step( sess, inputs, targets, initial_state) step_time = time.time() - start_time loss_per_checkpoint += batch_loss / FLAGS.steps_per_checkpoint current_step += 1 current_step_batch += 1 if current_step % FLAGS.steps_per_log == 0: perplexity = math.exp(float( batch_loss)) if batch_loss < 300 else float("inf") print( "global step %d learning rate %.4f step-time %.2f perplexity " "%.2f" % (model.global_step.eval(), model.learning_rate.eval(), step_time, perplexity)) if current_step % FLAGS.steps_per_checkpoint == 0: if len(previous_losses) > 2 and loss_per_checkpoint > max( previous_losses[-3:]) and sess.run( model.learning_rate) >= 0.0002: sess.run(model.learning_rate_decay_op) previous_losses.append(loss_per_checkpoint) loss_per_checkpoint = 0.0 model.saver.save(sess, checkpoint_path, global_step=model.global_step) if current_step_batch % batch_cnt == 0: print("reset initial state") initial_state = sess.run(model.initial_state) current_step_batch = 0 else: initial_state = final_state if current_step % FLAGS.steps_per_sentence_length == 0: max_time = np.random.choice(train_sentence_length, 1, p=sent_len_p)[0] print("change max time: %d" % (max_time)) batch_cnt = get_batch_cnt(arr, FLAGS.batch_size, max_time) current_step_batch = 0 initial_state = sess.run(model.initial_state) break if current_step >= FLAGS.max_train_steps: break if current_step >= FLAGS.max_train_steps: break model.saver.save(sess, checkpoint_path, global_step=model.global_step)