コード例 #1
0
def main(_):
    model_path = os.path.join('model', FLAGS.name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)
    with codecs.open(FLAGS.input_file, encoding='utf-8') as f:
        text = f.read()
    converter = TextConverter(text, FLAGS.max_vocab)
    converter.save_to_file(os.path.join(model_path, 'converter.pkl'))

    arr = converter.text_to_arr(text)
    g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps)

    with codecs.open(FLAGS.input_file_vali, encoding='utf-8') as f_v:
        text_v = f_v.read()
    # converter_v = TextConverter(text_v, FLAGS.max_vocab)
    # converter_v.save_to_file(os.path.join(model_path, 'converter.pkl'))

    arr_v = converter.text_to_arr(text_v)
    g_v = batch_generator(arr_v, FLAGS.num_seqs, FLAGS.num_steps)

    # print(converter.vocab_size)
    model = CharRNN(converter.vocab_size,
                    num_seqs=FLAGS.num_seqs,
                    num_steps=FLAGS.num_steps,
                    lstm_size=FLAGS.lstm_size,
                    num_layers=FLAGS.num_layers,
                    learning_rate=FLAGS.learning_rate,
                    train_keep_prob=FLAGS.train_keep_prob,
                    use_embedding=FLAGS.use_embedding,
                    embedding_size=FLAGS.embedding_size)
    model.train(g, FLAGS.max_steps, model_path, FLAGS.save_every_n,
                FLAGS.log_every_n, g_v)
コード例 #2
0
def main(_):
    model_path = os.path.join('model', FLAGS.name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)
    with codecs.open(FLAGS.input_file, encoding='utf-8') as f:
        text = f.read()
    converter = TextConverter(text, FLAGS.max_vocab)
    converter.save_to_file(os.path.join(model_path, 'converter.pkl'))

    arr = converter.text_to_arr(text)
    g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps)
    print(converter.vocab_size)
    model = CharRNN(converter.vocab_size,
                    num_seqs=FLAGS.num_seqs,
                    num_steps=FLAGS.num_steps,
                    lstm_size=FLAGS.lstm_size,
                    num_layers=FLAGS.num_layers,
                    learning_rate=FLAGS.learning_rate,
                    train_keep_prob=FLAGS.train_keep_prob,
                    use_embedding=FLAGS.use_embedding,
                    embedding_size=FLAGS.embedding_size
                    )
    model.train(g,
                FLAGS.max_steps,
                model_path,
                FLAGS.save_every_n,
                FLAGS.log_every_n,
                )
コード例 #3
0
ファイル: train.py プロジェクト: cwxcode/Char-RNN
def main(_):
    model_path = os.path.join('model', FLAGS.name)  # 保存模型的路径
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)
    # 用codecs提供的open方法来指定打开的文件的语言编码,它会在读取的时候自动转换为内部unicode
    with codecs.open(FLAGS.input_file, encoding='utf-8') as f:
        text = f.read()  # 读取训练的文本
    converter = TextConverter(text, FLAGS.max_vocab)  # 转换text文本格式
    converter.save_to_file(os.path.join(model_path, 'converter.pkl'))

    arr = converter.text_to_arr(text)  # 转换text为数组
    g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps)  # 批生成
    print(converter.vocab_size)
    model = CharRNN(converter.vocab_size,  # 读取模型
                    num_seqs=FLAGS.num_seqs,
                    num_steps=FLAGS.num_steps,
                    lstm_size=FLAGS.lstm_size,
                    num_layers=FLAGS.num_layers,
                    learning_rate=FLAGS.learning_rate,
                    train_keep_prob=FLAGS.train_keep_prob,
                    use_embedding=FLAGS.use_embedding,
                    embedding_size=FLAGS.embedding_size
                    )
    model.train(g,  # 训练
                FLAGS.max_steps,
                model_path,
                FLAGS.save_every_n,
                FLAGS.log_every_n,
                )
コード例 #4
0
def main(_):
    model_path = os.path.join('model', Config.file_name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)

    et = TextConverter(text=None,
                       save_dir='model/en_vocab.pkl',
                       max_vocab=Config.en_vocab_size,
                       seq_length=Config.seq_length)
    zt = TextConverter(text=None,
                       save_dir='model/zh_vocab.pkl',
                       max_vocab=Config.zh_vocab_size,
                       seq_length=Config.seq_length +
                       1)  # +1是因为,decoder层序列拆成input=[:-1]和label=[1:]
    print('english vocab lens:', et.vocab_size)
    print('chines20000e vocab lens:', zt.vocab_size)

    en_arrs = et.get_en_arrs('data/train.tags.data.en_clear')
    zh_arrs = zt.get_en_arrs('data/train.tags.data.zh_clear')

    train_g = batch_generator(en_arrs, zh_arrs, Config.batch_size)

    # 加载上一次保存的模型
    model = Model(Config)
    checkpoint_path = tf.train.latest_checkpoint(model_path)
    if checkpoint_path:
        model.load(checkpoint_path)

    print('start to training...')
    model.train(train_g, model_path)
コード例 #5
0
def main(_):
    model_path = os.path.join('model', FLAGS.name)
    #print(model_path)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)
    with codecs.open(FLAGS.input_file, encoding='utf-8') as f:
        text = f.read()
    converter = TextConverter(text, FLAGS.max_vocab)
    converter.save_to_file(os.path.join(model_path, 'converter.pkl'))

    arr = converter.text_to_arr(text)
    g = batch_generator(arr, FLAGS.num_seq, FLAGS.num_step)
    print(converter.vocab_size)
    model = CharModel(
        converter.vocab_size,
        num_seq=FLAGS.num_seq,
        num_step=FLAGS.num_step,
        lstm_size=FLAGS.lstm_size,
        num_layers=FLAGS.num_layers,
        #learning_rate=FLAGS.learning_rate,
        train_keep_prob=FLAGS.train_keep_prob,
        #use_embedding=FLAGS.use_embedding,
        embedding_size=FLAGS.embedding_size,
        is_Training=True)
    #model.add_placeholder()
    #model.build_lstm()
    #model.build_loss()
    #model.build_optimizer()
    model.train(g, FLAGS.max_steps, model_path)
コード例 #6
0
def main(_):
    model_path = os.path.join('model', FLAGS.name)#创建路径字符串
    if os.path.exists(model_path) is False:#创建文件夹路径
        os.makedirs(model_path)
    with codecs.open(FLAGS.input_file, encoding='utf-8') as f:
        text = f.read()#读取整个文件作为字符串
    converter = TextConverter(text, FLAGS.max_vocab)
    converter.save_to_file(os.path.join(model_path, 'converter.pkl'))

    arr = converter.text_to_arr(text)#将文本序列化
    g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps)#100,100
    print(converter.vocab_size)
    model = CharRNN(converter.vocab_size,#创建模型,这里num_classes设置为了字典的大小,因为要预测下一个char
                    num_seqs=FLAGS.num_seqs,
                    num_steps=FLAGS.num_steps,
                    lstm_size=FLAGS.lstm_size,
                    num_layers=FLAGS.num_layers,
                    learning_rate=FLAGS.learning_rate,
                    train_keep_prob=FLAGS.train_keep_prob,
                    use_embedding=FLAGS.use_embedding,
                    embedding_size=FLAGS.embedding_size
                    )
    model.train(g,#训练模型
                FLAGS.max_steps,
                model_path,
                FLAGS.save_every_n,
                FLAGS.log_every_n,
                )
コード例 #7
0
def main(_):
    script_path = os.path.abspath(os.path.dirname(__file__))
    model_path = os.path.join(script_path, 'model', FLAGS.name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)
    with codecs.open(FLAGS.input_file, encoding='utf-8') as f:
        text = f.read()
    print("corpus size " + str(len(text)))

    if os.path.exists(FLAGS.whitelist_file):
        with codecs.open(FLAGS.whitelist_file, encoding='utf-8') as f:
            whitelist = f.read()
        text = remove_non_matching_chars(text, whitelist)

    converter = TextConverter(text, FLAGS.max_vocab)
    converter.save_to_file(os.path.join(model_path, 'converter.pkl'))

    arr = converter.text_to_arr(text)
    g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps)
    model = CharRNN(converter.vocab_size,
                    num_seqs=FLAGS.num_seqs,
                    num_steps=FLAGS.num_steps,
                    lstm_size=FLAGS.lstm_size,
                    num_layers=FLAGS.num_layers,
                    learning_rate=FLAGS.learning_rate,
                    train_keep_prob=FLAGS.train_keep_prob,
                    use_embedding=FLAGS.use_embedding,
                    embedding_size=FLAGS.embedding_size)
    model.train(
        g,
        FLAGS.max_steps,
        model_path,
        FLAGS.save_every_n,
        FLAGS.log_every_n,
    )
コード例 #8
0
def main(_):
    model_path = os.path.join('model', FLAGS.name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)  #创建model存储路径
    with codecs.open(FLAGS.input_file, encoding='utf-8') as f:
        text = f.read()  #读取text
    converter = TextConverter(text, FLAGS.max_vocab)  #创建映射表
    converter.save_to_file(os.path.join(model_path,
                                        'converter.pkl'))  #将映射表存在model_path

    arr = converter.text_to_arr(text)  #将text转为Id
    g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps)  #创建batch
    print(converter.vocab_size)
    model = CharRNN(
        converter.vocab_size,  #创建模型示例
        num_seqs=FLAGS.num_seqs,
        num_steps=FLAGS.num_steps,
        lstm_size=FLAGS.lstm_size,
        num_layers=FLAGS.num_layers,
        learning_rate=FLAGS.learning_rate,
        train_keep_prob=FLAGS.train_keep_prob,
        use_embedding=FLAGS.use_embedding,
        embedding_size=FLAGS.embedding_size)
    model.train(
        g,  #进行模型训练
        FLAGS.max_steps,
        model_path,
        FLAGS.save_every_n,
        FLAGS.log_every_n,
    )
コード例 #9
0
ファイル: train.py プロジェクト: hacksman/char_rnn
def main(_):
    # 设置模型的保存路径
    model_path = os.path.join('model', FLAGS.name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)
    # 载入待训练文件
    with codecs.open(FLAGS.input_file, encoding='utf-8') as f:
        text = f.read()

    # 构建文字转换的实例
    converter = TextCoverter(text, FLAGS.max_vocab)
    # 保存已转换的文字实例的序列化数据,供后面的模型使用
    converter.save_to_file(os.path.join(model_path, 'converter.pkl'))

    # 将词转换成对应的词典中的位置的索引, 如“寒随穷律变,春逐鸟声开。初风飘带柳,晚雪间花梅。”, 因为','和句号在词典中排在前两位,
    # 则它们对应的索引是'0'和'1',此处对应的arr即为[15 17 12 22 6 0 5 8 18 19 16 1 4 7 2 21 3 9 0 10 11 20 13 14 1]
    arr = converter.text_to_arr(text)
    g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps)
    for x, y in g:
        print(x)
        print(y)
        break
    print("This is vocabulary size length: {}".format(converter.vocab_size))

    # 模型搭建
    model = CharRNN(converter.vocab_size,
                    num_seqs=FLAGS.num_seqs,
                    num_steps=FLAGS.num_steps,
                    lstm_size=FLAGS.lstm_size,
                    num_layers=FLAGS.num_layers,
                    learning_rate=FLAGS.learning_rate,
                    train_keep_prob=FLAGS.train_keep_prob,
                    use_embedding=FLAGS.use_embedding,
                    embedding_size=FLAGS.embedding_size)
コード例 #10
0
ファイル: train.py プロジェクト: zoulala/ppd_ai_TALKS
def main(_):
    word_char = 'word'  # 'word' or 'char'
    print('use word or char:',word_char)

    FLAGS.file_name = word_char+'_'+FLAGS.file_name
    print('model_path:',FLAGS.file_name)

    model_path = os.path.join('models', FLAGS.file_name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)

    if FLAGS.file_name[-1] == '2':
        from model2 import Model
    elif FLAGS.file_name[-1] == '3':
        from model3 import Model
    elif FLAGS.file_name[-1] == '4':
        from model4 import Model
    elif FLAGS.file_name[-1] == '5':
        from model5 import Model
    else:
        from model1 import Model

    data_path,save_path = 'data','process_data1'

    converter = TextConverter(word_char, data_path, save_path,  FLAGS.num_steps)
    embeddings = converter.embeddings

    if word_char == 'word':
        train_pkl = 'train_word.pkl'
        val_pkl = 'val_word.pkl'
    if word_char == 'char':
        train_pkl = 'train_char.pkl'
        val_pkl = 'val_char.pkl'

    train_samples = converter.load_obj(os.path.join(save_path, train_pkl))
    train_g = batch_generator(train_samples, FLAGS.batch_size)

    val_samples = converter.load_obj(os.path.join(save_path, val_pkl))
    val_g = val_samples_generator(val_samples)


    print('use embeding:',FLAGS.use_embedding)
    print('vocab size:',converter.vocab_size)


    model = Model(converter.vocab_size,FLAGS,test=False, embeddings=embeddings)

    # 继续上一次模型训练
    FLAGS.checkpoint_path = tf.train.latest_checkpoint(model_path)
    if FLAGS.checkpoint_path:
        model.load(FLAGS.checkpoint_path)

    model.train(train_g,
                FLAGS.max_steps,
                model_path,
                FLAGS.save_every_n,
                FLAGS.log_every_n,
                val_g
                )
コード例 #11
0
 def test_batch_generator(self):
     with codecs.open('data/shakespeare.txt', encoding='utf-8') as f:
         text = f.read()
     converter = TextConverter(text, 35000)
     arr = converter.text_to_arr(text)
     g = batch_generator(arr, 32, 50)
     count = 0
     for x, y in g:
         count += 1
         print(count)
コード例 #12
0
def main(_):
    model_path = os.path.join('model', 'en')
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)
    with open("data/shakespeare.txt") as f:
        text = f.read()
    print("=====>", len(text))
    converter = TextConverter(text)
    converter.save(os.path.join(model_path, "converter.pkl"))

    arr = converter.text_to_arr(text)
    g = batch_generator(arr, batch_size, seq_len, converter=None)

    model = charRNN(converter.vocab_size)
    
    model.train(g, model_path)
コード例 #13
0
ファイル: train.py プロジェクト: LZY2006/Char-RNN-TensorFlow
def main(_):
    model_path = os.path.join('model', FLAGS.name)
    print(model_path)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)
        path_exist = False
    else:
        path_exist = True
    with codecs.open(FLAGS.input_file, encoding='utf-8') as f:
        text = f.read()
    converter = TextConverter(text, FLAGS.max_vocab)
    converter.save_to_file(os.path.join(model_path, 'converter.pkl'))

    arr = converter.text_to_arr(text)
    g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps)
    print(converter.vocab_size)
    model = CharRNN(converter.vocab_size,
                    num_seqs=FLAGS.num_seqs,
                    num_steps=FLAGS.num_steps,
                    lstm_size=FLAGS.lstm_size,
                    num_layers=FLAGS.num_layers,
                    learning_rate=FLAGS.learning_rate,
                    train_keep_prob=FLAGS.train_keep_prob,
                    use_embedding=FLAGS.use_embedding,
                    embedding_size=FLAGS.embedding_size
                    )
    model_file_path = tf.train.latest_checkpoint(model_path)
    if path_exist:
        model.load(model_file_path)
        indexes = []
        for dirpath, dirnames, filenames in os.walk(model_path):
            for name in filenames:
                filepath = os.path.join(dirpath, name)
                if filepath.endswith(".index"):
                    indexes.append(int(name[6:-6]))
        indexes.sort()
        last_index = indexes[-1]
        model.step = last_index

    model.train(g,
                FLAGS.max_steps,
                model_path,
                FLAGS.save_every_n,
                FLAGS.log_every_n,
                )
コード例 #14
0
def main(_):
    model_path = os.path.join('models', FLAGS.name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)

    # excel data
    QAs = get_excel_QAs(
        FLAGS.input_file)  # 要求excel文件格式,第一个表,第一列id,第二列query,第三列response

    # # xhj data
    # from read_utils import loadConversations
    # QAs = loadConversations(FLAGS.input_file)

    text = get_QAs_text(QAs)

    if os.path.exists(os.path.join(model_path, 'converter.pkl')) is False:
        print('词库文件不存在,创建...')
        converter = TextConverter(text, FLAGS.max_vocab)
        converter.save_to_file(os.path.join(model_path, 'converter.pkl'))
    else:
        converter = TextConverter(
            filename=os.path.join(model_path, 'converter.pkl'))

    QA_arrs = converter.QAs_to_arrs(QAs, FLAGS.num_steps)
    samples = converter.samples_for_train(QA_arrs)
    g = batch_generator(samples, FLAGS.num_seqs)

    print(converter.vocab_size)
    model = DualLSTM(converter.vocab_size,
                     batch_size=FLAGS.num_seqs,
                     num_steps=FLAGS.num_steps,
                     lstm_size=FLAGS.lstm_size,
                     num_layers=FLAGS.num_layers,
                     learning_rate=FLAGS.learning_rate,
                     train_keep_prob=FLAGS.train_keep_prob,
                     use_embedding=FLAGS.use_embedding,
                     embedding_size=FLAGS.embedding_size)
    model.train(
        g,
        FLAGS.max_steps,
        model_path,
        FLAGS.save_every_n,
        FLAGS.log_every_n,
    )
コード例 #15
0
def main(_):
    model_path = os.path.join('models', FLAGS.file_name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)

    if os.path.exists(os.path.join(
            model_path, 'converter.pkl')) or os.path.exists(
                os.path.join(model_path, 'QAs.pkl')) is False:
        print('词库文件不存在,创建...')
        QAs, text = load_origin_data('data/task3_train.txt')
        converter = TextConverter(text, 5000)
        converter.save_to_file(converter.vocab,
                               os.path.join(model_path, 'converter.pkl'))
        converter.save_to_file(QAs, os.path.join(model_path, 'QAs.pkl'))
    else:
        converter = TextConverter(
            filename=os.path.join(model_path, 'converter.pkl'))
        QAs = converter.load_obj(filename=os.path.join(model_path, 'QAs.pkl'))

    QA_arrs = converter.QAs_to_arrs(QAs, FLAGS.num_steps)

    thres = int(len(QA_arrs) * 0.9)
    train_samples = QA_arrs[:thres]
    val_samples = QA_arrs[thres:]

    train_g = batch_generator(train_samples, FLAGS.batch_size)
    val_g = val_samples_generator(val_samples)

    print('use embeding:', FLAGS.use_embedding)
    print('vocab size:', converter.vocab_size)

    from model3 import Model
    model = Model(converter.vocab_size, FLAGS, test=False, embeddings=None)

    # 继续上一次模型训练
    FLAGS.checkpoint_path = tf.train.latest_checkpoint(model_path)
    if FLAGS.checkpoint_path:
        model.load(FLAGS.checkpoint_path)

    model.train(train_g, FLAGS.max_steps, model_path, FLAGS.save_every_n,
                FLAGS.log_every_n, val_g)
コード例 #16
0
ファイル: train.py プロジェクト: zoulala/Seq2seq_couplet
def main(_):

    model_path = os.path.join('models', Config.file_name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)

    converter = TextConverter(vocab_dir='data/vocabs',
                              max_vocab=Config.vocab_size,
                              seq_length=Config.seq_length)
    print('vocab lens:', converter.vocab_size)

    en_arrs = converter.get_en_arrs('data/train/in.txt')
    de_arrs = converter.get_de_arrs('data/train/out.txt')

    train_g = batch_generator(en_arrs, de_arrs, Config.batch_size)

    # 加载上一次保存的模型
    model = Model(Config)
    checkpoint_path = tf.train.latest_checkpoint(model_path)
    if checkpoint_path:
        model.load(checkpoint_path)

    print('start to training...')
    model.train(train_g, model_path)
コード例 #17
0
def main(_):
    ## 对数据进行预处理。调用read_utils.py模块中的文本转换类TextConverter,获取经过频数挑选的字符并且得到相应的index。
    ## 然后调用batch_generator函数得到一个batch生成器。
    model_path = os.path.join('model', FLAGS.name) # 路径拼接
    print("模型保存位置: ", model_path)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path) # 递归创建目录
    # Python读取文件中的汉字方法:导入codecs,添加encoding='utf-8'
    with codecs.open(FLAGS.input_file, encoding='utf-8') as f:
        print("建模训练数据来源", FLAGS.input_file)
        text = f.read()
    # 返回一个词典文件
    converter = TextConverter(text, FLAGS.max_vocab)
    # 将经过频数挑选的字符序列化保存
    converter.save_to_file(os.path.join(model_path, 'converter.pkl'))
    arr = converter.text_to_arr(text) #得到每个字符的index
    g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps) # 得到一个batch生长期
    print(converter.vocab_size) # 打印字符数量

    ## 数据处理完毕后,调用model.py模块的CharRNN类构造循环神经网络,最后调用train()函数对神经网络进行训练
    model = CharRNN(converter.vocab_size, #字符分类的数量
                    num_seqs=FLAGS.num_seqs, #一个batch中的序列数
                    num_steps=FLAGS.num_steps, #一个序列中的字符数
                    lstm_size=FLAGS.lstm_size, #每个cell的节点数量
                    num_layers=FLAGS.num_layers, #RNN的层数
                    learning_rate=FLAGS.learning_rate,
                    train_keep_prob=FLAGS.train_keep_prob,
                    use_embedding=FLAGS.use_embedding,
                    embedding_size=FLAGS.embedding_size
                    )
    model.train(g,
                FLAGS.max_steps,
                model_path,
                FLAGS.save_every_n,
                FLAGS.log_every_n,
                )
コード例 #18
0
def main(_):
    model_path = os.path.join('model', FLAGS.name)
    arr, converter = initialize_converter(model_path)
    g = batch_generator(arr, FLAGS.num_seqs, FLAGS.num_steps)
    model = CharRNN(
        num_classes=converter.vocab_size,
        num_seqs=FLAGS.num_seqs,
        num_steps=FLAGS.num_steps,
        lstm_size=FLAGS.lstm_size,
        num_layers=FLAGS.num_layers,
        learning_rate=FLAGS.learning_rate,
        train_keep_prob=FLAGS.train_keep_prob,
        use_embedding=FLAGS.use_embedding,
        embedding_size=FLAGS.embedding_size,
        text_converter=converter
    )

    model.train(
        g,
        FLAGS.max_steps,
        model_path,
        FLAGS.save_every_n,
        FLAGS.log_every_n
    )
コード例 #19
0
def train():

    with tf.Session() as sess:
        model_path = os.path.join(FLAGS.train_dir, FLAGS.model_name)
        if (not os.path.exists(model_path)):
            os.makedirs(model_path)
        checkpoint_path = os.path.join(model_path, "generate.ckpt")

        with codecs.open(FLAGS.input_file, encoding='utf-8') as f:
            text = f.read()  #.replace("\n", "")
        converter_path = os.path.join(model_path, 'converter.pkl')
        if (not os.path.exists(converter_path)):
            print("construct converter.")
            converter = TextConverter(text, FLAGS.max_vocab_size)
            converter.save_to_file(os.path.join(model_path, 'converter.pkl'))
        else:
            print("load converter")
            converter = TextConverter(None, FLAGS.max_vocab_size,
                                      converter_path)
        print("actual vocabulary size is: " + str(converter.vocab_size))

        arr = converter.text_to_arr(text)
        sent_len_p = [
            1.0 / len(train_sentence_length) for l in train_sentence_length
        ]
        max_time = np.random.choice(train_sentence_length, 1, p=sent_len_p)[0]
        batch_cnt = get_batch_cnt(arr, FLAGS.batch_size, max_time)
        current_step_batch = 0

        # create model
        print("Creating %d layers of %d units for max time %d." %
              (FLAGS.num_layers, FLAGS.lstm_size, max_time))
        model = create_model(sess, converter.vocab_size, False, model_path)
        if (FLAGS.set_learning_rate > 0):
            model.set_learning_rate(sess, FLAGS.set_learning_rate)

        loss_per_checkpoint = 0.0
        current_step = 0
        previous_losses = []
        initial_state = sess.run(model.initial_state)
        while True:
            g = batch_generator(arr, FLAGS.batch_size, max_time)
            for inputs, targets in g:

                start_time = time.time()
                batch_loss, final_state = model.train_step(
                    sess, inputs, targets, initial_state)
                step_time = time.time() - start_time
                loss_per_checkpoint += batch_loss / FLAGS.steps_per_checkpoint
                current_step += 1
                current_step_batch += 1

                if current_step % FLAGS.steps_per_log == 0:
                    perplexity = math.exp(float(
                        batch_loss)) if batch_loss < 300 else float("inf")
                    print(
                        "global step %d learning rate %.4f step-time %.2f perplexity "
                        "%.2f" %
                        (model.global_step.eval(), model.learning_rate.eval(),
                         step_time, perplexity))

                if current_step % FLAGS.steps_per_checkpoint == 0:
                    if len(previous_losses) > 2 and loss_per_checkpoint > max(
                            previous_losses[-3:]) and sess.run(
                                model.learning_rate) >= 0.0002:
                        sess.run(model.learning_rate_decay_op)
                    previous_losses.append(loss_per_checkpoint)
                    loss_per_checkpoint = 0.0
                    model.saver.save(sess,
                                     checkpoint_path,
                                     global_step=model.global_step)

                if current_step_batch % batch_cnt == 0:
                    print("reset initial state")
                    initial_state = sess.run(model.initial_state)
                    current_step_batch = 0
                else:
                    initial_state = final_state

                if current_step % FLAGS.steps_per_sentence_length == 0:
                    max_time = np.random.choice(train_sentence_length,
                                                1,
                                                p=sent_len_p)[0]
                    print("change max time: %d" % (max_time))
                    batch_cnt = get_batch_cnt(arr, FLAGS.batch_size, max_time)
                    current_step_batch = 0
                    initial_state = sess.run(model.initial_state)
                    break

                if current_step >= FLAGS.max_train_steps:
                    break

            if current_step >= FLAGS.max_train_steps:
                break
        model.saver.save(sess, checkpoint_path, global_step=model.global_step)