Beispiel #1
0
def main(_):
    word_char = 'word'  # 'word' or 'char'
    print('use word or char:',word_char)

    FLAGS.file_name = word_char+'_'+FLAGS.file_name
    print('model_path:',FLAGS.file_name)

    model_path = os.path.join('models', FLAGS.file_name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)

    if FLAGS.file_name[-1] == '2':
        from model2 import Model
    elif FLAGS.file_name[-1] == '3':
        from model3 import Model
    elif FLAGS.file_name[-1] == '4':
        from model4 import Model
    elif FLAGS.file_name[-1] == '5':
        from model5 import Model
    else:
        from model1 import Model

    data_path,save_path = 'data','process_data1'

    converter = TextConverter(word_char, data_path, save_path,  FLAGS.num_steps)
    embeddings = converter.embeddings

    if word_char == 'word':
        train_pkl = 'train_word.pkl'
        val_pkl = 'val_word.pkl'
    if word_char == 'char':
        train_pkl = 'train_char.pkl'
        val_pkl = 'val_char.pkl'

    train_samples = converter.load_obj(os.path.join(save_path, train_pkl))
    train_g = batch_generator(train_samples, FLAGS.batch_size)

    val_samples = converter.load_obj(os.path.join(save_path, val_pkl))
    val_g = val_samples_generator(val_samples)


    print('use embeding:',FLAGS.use_embedding)
    print('vocab size:',converter.vocab_size)


    model = Model(converter.vocab_size,FLAGS,test=False, embeddings=embeddings)

    # 继续上一次模型训练
    FLAGS.checkpoint_path = tf.train.latest_checkpoint(model_path)
    if FLAGS.checkpoint_path:
        model.load(FLAGS.checkpoint_path)

    model.train(train_g,
                FLAGS.max_steps,
                model_path,
                FLAGS.save_every_n,
                FLAGS.log_every_n,
                val_g
                )
Beispiel #2
0
def main(_):
    # # FLAGS.start_string = FLAGS.start_string#.decode('utf-8')
    word_char = 'word'  # 'word' or 'char'
    print('use word or char:', word_char)

    FLAGS.file_name = word_char + '_' + FLAGS.file_name
    print('model_path:', FLAGS.file_name)

    model_path = os.path.join('models', FLAGS.file_name)
    if os.path.isdir(model_path):
        FLAGS.checkpoint_path = tf.train.latest_checkpoint(model_path)

    if FLAGS.file_name[-1] == '2':
        from model2 import Model
    elif FLAGS.file_name[-1] == '3':
        from model3 import Model
    elif FLAGS.file_name[-1] == '4':
        from model4 import Model
    elif FLAGS.file_name[-1] == '5':
        from model5 import Model
    else:
        from model1 import Model

    data_path, save_path = 'data', 'process_data1'

    converter = TextConverter(word_char, data_path, save_path, FLAGS.num_steps)
    embeddings = converter.embeddings

    if word_char == 'word':
        test_pkl = 'test_word.pkl'
    if word_char == 'char':
        test_pkl = 'test_char.pkl'

    test_samples = converter.load_obj(os.path.join(save_path, test_pkl))

    print('use embeding:', FLAGS.use_embedding)
    print('vocab size:', converter.vocab_size)

    with open(model_path + '/submission.csv', 'w') as file:
        file.write(str('y_pre') + '\n')
    for i in range(0, len(test_samples), 5000):  # 内存不足 分批test
        print('>>>>:', i, '/', len(test_samples))
        test_g = test_samples_generator(test_samples[i:i + 5000])

        model = Model(converter.vocab_size,
                      FLAGS,
                      test=False,
                      embeddings=embeddings)

        model.load(FLAGS.checkpoint_path)

        model.test(test_g, model_path)
    print('finished!')