예제 #1
0
def main(_):
    word_char = 'word'  # 'word' or 'char'
    print('use word or char:',word_char)

    FLAGS.file_name = word_char+'_'+FLAGS.file_name
    print('model_path:',FLAGS.file_name)

    model_path = os.path.join('models', FLAGS.file_name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)

    if FLAGS.file_name[-1] == '2':
        from model2 import Model
    elif FLAGS.file_name[-1] == '3':
        from model3 import Model
    elif FLAGS.file_name[-1] == '4':
        from model4 import Model
    elif FLAGS.file_name[-1] == '5':
        from model5 import Model
    else:
        from model1 import Model

    data_path,save_path = 'data','process_data1'

    converter = TextConverter(word_char, data_path, save_path,  FLAGS.num_steps)
    embeddings = converter.embeddings

    if word_char == 'word':
        train_pkl = 'train_word.pkl'
        val_pkl = 'val_word.pkl'
    if word_char == 'char':
        train_pkl = 'train_char.pkl'
        val_pkl = 'val_char.pkl'

    train_samples = converter.load_obj(os.path.join(save_path, train_pkl))
    train_g = batch_generator(train_samples, FLAGS.batch_size)

    val_samples = converter.load_obj(os.path.join(save_path, val_pkl))
    val_g = val_samples_generator(val_samples)


    print('use embeding:',FLAGS.use_embedding)
    print('vocab size:',converter.vocab_size)


    model = Model(converter.vocab_size,FLAGS,test=False, embeddings=embeddings)

    # 继续上一次模型训练
    FLAGS.checkpoint_path = tf.train.latest_checkpoint(model_path)
    if FLAGS.checkpoint_path:
        model.load(FLAGS.checkpoint_path)

    model.train(train_g,
                FLAGS.max_steps,
                model_path,
                FLAGS.save_every_n,
                FLAGS.log_every_n,
                val_g
                )
예제 #2
0
def main(_):
    # # FLAGS.start_string = FLAGS.start_string#.decode('utf-8')
    word_char = 'word'  # 'word' or 'char'
    print('use word or char:', word_char)

    FLAGS.file_name = word_char + '_' + FLAGS.file_name
    print('model_path:', FLAGS.file_name)

    model_path = os.path.join('models', FLAGS.file_name)
    if os.path.isdir(model_path):
        FLAGS.checkpoint_path = tf.train.latest_checkpoint(model_path)

    if FLAGS.file_name[-1] == '2':
        from model2 import Model
    elif FLAGS.file_name[-1] == '3':
        from model3 import Model
    elif FLAGS.file_name[-1] == '4':
        from model4 import Model
    elif FLAGS.file_name[-1] == '5':
        from model5 import Model
    else:
        from model1 import Model

    data_path, save_path = 'data', 'process_data1'

    converter = TextConverter(word_char, data_path, save_path, FLAGS.num_steps)
    embeddings = converter.embeddings

    if word_char == 'word':
        test_pkl = 'test_word.pkl'
    if word_char == 'char':
        test_pkl = 'test_char.pkl'

    test_samples = converter.load_obj(os.path.join(save_path, test_pkl))

    print('use embeding:', FLAGS.use_embedding)
    print('vocab size:', converter.vocab_size)

    with open(model_path + '/submission.csv', 'w') as file:
        file.write(str('y_pre') + '\n')
    for i in range(0, len(test_samples), 5000):  # 内存不足 分批test
        print('>>>>:', i, '/', len(test_samples))
        test_g = test_samples_generator(test_samples[i:i + 5000])

        model = Model(converter.vocab_size,
                      FLAGS,
                      test=False,
                      embeddings=embeddings)

        model.load(FLAGS.checkpoint_path)

        model.test(test_g, model_path)
    print('finished!')
예제 #3
0
def main(_):
    model_path = os.path.join('models', FLAGS.file_name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)

    if os.path.exists(os.path.join(
            model_path, 'converter.pkl')) or os.path.exists(
                os.path.join(model_path, 'QAs.pkl')) is False:
        print('词库文件不存在,创建...')
        QAs, text = load_origin_data('data/task3_train.txt')
        converter = TextConverter(text, 5000)
        converter.save_to_file(converter.vocab,
                               os.path.join(model_path, 'converter.pkl'))
        converter.save_to_file(QAs, os.path.join(model_path, 'QAs.pkl'))
    else:
        converter = TextConverter(
            filename=os.path.join(model_path, 'converter.pkl'))
        QAs = converter.load_obj(filename=os.path.join(model_path, 'QAs.pkl'))

    QA_arrs = converter.QAs_to_arrs(QAs, FLAGS.num_steps)

    thres = int(len(QA_arrs) * 0.9)
    train_samples = QA_arrs[:thres]
    val_samples = QA_arrs[thres:]

    train_g = batch_generator(train_samples, FLAGS.batch_size)
    val_g = val_samples_generator(val_samples)

    print('use embeding:', FLAGS.use_embedding)
    print('vocab size:', converter.vocab_size)

    from model3 import Model
    model = Model(converter.vocab_size, FLAGS, test=False, embeddings=None)

    # 继续上一次模型训练
    FLAGS.checkpoint_path = tf.train.latest_checkpoint(model_path)
    if FLAGS.checkpoint_path:
        model.load(FLAGS.checkpoint_path)

    model.train(train_g, FLAGS.max_steps, model_path, FLAGS.save_every_n,
                FLAGS.log_every_n, val_g)
예제 #4
0
if __name__ == "__main__":

    from sklearn.metrics import log_loss

    logloss = log_loss([0, 1, 1, 1, 1], [0, 1, 1, 1, 1], eps=1e-15)
    print('logloss:', logloss)

    from read_utils import TextConverter, val_samples_generator

    data_path, save_path = 'data', 'process_data'
    converter = TextConverter(data_path, save_path, 20)
    embeddings = converter.embeddings
    ww = WordVec(embeddings)

    val_samples = converter.load_obj(os.path.join(save_path, 'train_word.pkl'))
    val_g = val_samples_generator(val_samples[40000:80000])

    q_val, q_len, r_val, r_len, y = val_g

    embed_query_seqs = ww.sens_to_embed(q_val)
    embed_respones_seqs = ww.sens_to_embed(r_val)

    assert embed_query_seqs.shape[0] == embed_respones_seqs.shape[
        0], 'not equal'

    n = embed_query_seqs.shape[0]

    print('start dot.')
    y_pre = []
    for i in range(n):