Пример #1
0
def main(_):

    # TODO
    # Do what you need to load datasets from FLAGS.data_dir
    dataset = dict()
    for dataset_type in ['train', 'val']:
        with open(os.path.join(FLAGS.data_dir,
                               "%s.ids.context" % dataset_type)) as f:
            data_context = [
                map(int, line.split()) for line in f.read().splitlines()
            ]
        with open(
                os.path.join(FLAGS.data_dir,
                             "%s.ids.question" % dataset_type)) as f:
            data_question = [
                map(int, line.split()) for line in f.read().splitlines()
            ]
        with open(os.path.join(FLAGS.data_dir, "%s.span" % dataset_type)) as f:
            data_span = [
                map(int, line.split()) for line in f.read().splitlines()
            ]
        dataset[dataset_type] = (data_context, data_question, data_span)

    embed_path = FLAGS.embed_path or pjoin(
        "data", "squad", "glove.trimmed.{}.npz".format(FLAGS.embedding_size))
    vocab_path = FLAGS.vocab_path or pjoin(FLAGS.data_dir, "vocab.dat")
    vocab, rev_vocab = initialize_vocab(vocab_path)

    encoder = Encoder(size=FLAGS.state_size,
                      vocab_dim=FLAGS.embedding_size,
                      config=config)
    decoder = Decoder(output_size=FLAGS.output_size, config=config)

    qa = QASystem(encoder, decoder, config=config)

    if not os.path.exists(FLAGS.log_dir):
        os.makedirs(FLAGS.log_dir)
    file_handler = logging.FileHandler(pjoin(FLAGS.log_dir, "log.txt"))
    logging.getLogger().addHandler(file_handler)

    print(vars(FLAGS))
    with open(os.path.join(FLAGS.log_dir, "flags.json"), 'w') as fout:
        json.dump(FLAGS.__flags, fout)

    with tf.Session() as sess:
        load_train_dir = get_normalized_train_dir(FLAGS.load_train_dir
                                                  or FLAGS.train_dir)
        initialize_model(sess, qa, load_train_dir)

        save_train_dir = get_normalized_train_dir(FLAGS.train_dir)
        qa.train(sess, dataset, save_train_dir)

        qa.evaluate_answer(sess, dataset, FLAGS.evaluate, log=True)
Пример #2
0
def run_func():
    config = Config()
    train = squad_dataset(config.question_train, config.context_train,
                          config.answer_train)
    dev = squad_dataset(config.question_dev, config.context_dev,
                        config.answer_dev)
    # print(config.question_train)
    embed_path = config.embed_path
    vocab_path = config.vocab_path
    # print(config.embed_path, config.vocab_path)
    vocab, rev_vocab = initialize_vocab(vocab_path)

    embeddings = get_trimmed_glove_vectors(embed_path)

    encoder = Encoder(config.hidden_size)
    decoder = Decoder(config.hidden_size)

    qa = QASystem(encoder, decoder, embeddings, config)

    with tf.Session() as sess:
        # ====== Load a pretrained model if it exists or create a new one if no pretrained available ======
        qa.initialize_model(sess, config.train_dir)
        # train process
        # qa.train(sess, [train, dev], config.train_dir)
        # em = qa.evaluate_model(sess, dev)

        # run process
        while True:
            question = input('please input question: ')
            if question == 'exit':
                break
            raw_context = input('please input context: ')
            if raw_context == 'exit':
                break
            question = [
                vocab[x] if x in vocab.keys() else 2 for x in question.split()
            ]
            context = [
                vocab[x] if x in vocab.keys() else 2
                for x in raw_context.split()
            ]
            test = [[question], [context], [[1, 2]]]
            a_s, a_e = qa.answer(sess, test)
            if a_e == a_s:
                print("answer: ", raw_context.split()[a_s[0]])
            else:
                print("answer: ",
                      ' '.join(raw_context.split()[a_s[0]:a_e[0] + 1]))
Пример #3
0
def main(_):
    # Do what you need to load datasets from FLAGS.data_dir
    datasetTrain = initialize_datasets(FLAGS.data_dir,
                                       'train.',
                                       debugMode=False)
    datasetVal = initialize_datasets(FLAGS.data_dir, 'val.', debugMode=False)
    datasetTrain.extend(datasetVal)

    embed_path = FLAGS.embed_path or pjoin(
        "data", "squad", "glove.trimmed.{}.npz".format(FLAGS.embedding_size))
    vocab_path = FLAGS.vocab_path or pjoin(FLAGS.data_dir, "vocab.dat")
    vocab, rev_vocab = initialize_vocab(vocab_path)

    encoder = Encoder(size=FLAGS.state_size, vocab_dim=FLAGS.embedding_size)
    decoder = Decoder(output_size=FLAGS.output_size)

    #This is taking a long time
    tic = datetime.now()
    qa = QASystem(encoder, decoder, embed_path, FLAGS, rev_vocab)
    print('Time to setup the model: ', datetime.now() - tic)

    if not os.path.exists(FLAGS.log_dir):
        os.makedirs(FLAGS.log_dir)
    file_handler = logging.FileHandler(pjoin(FLAGS.log_dir, "log.txt"))
    logging.getLogger().addHandler(file_handler)

    print(vars(FLAGS))
    with open(os.path.join(FLAGS.log_dir, "flags.json"), 'w') as fout:
        json.dump(FLAGS.__flags, fout)

    #saver = tf.train.Saver()

    with tf.Session() as sess:
        load_train_dir = get_normalized_train_dir(FLAGS.load_train_dir
                                                  or FLAGS.train_dir)
        initialize_model(sess, qa, load_train_dir)

        # Get directory to save model
        #save_train_dir = get_normalized_train_dir(FLAGS.train_dir)
        results_path = "results/{:%Y%m%d_%H%M%S}/".format(datetime.now())
        save_train_dir = results_path + "model.weights/"
        if not os.path.exists(save_train_dir):
            os.makedirs(save_train_dir)

        qa.train(sess, datasetTrain, save_train_dir)  #, saver)

        qa.evaluate_answer(sess, datasetVal, rev_vocab, sample=1000, log=True)
Пример #4
0
def main(_):
    # Do what you need to load datasets from FLAGS.data_dir
    dataset = load_dataset()

    embed_path = FLAGS.embed_path or pjoin(
        "data", "squad", "glove.trimmed.{}.npz".format(FLAGS.embedding_size))
    vocab_path = FLAGS.vocab_path or pjoin(FLAGS.data_dir, "vocab.dat")
    vocab, rev_vocab = initialize_vocab(vocab_path)

    encoder = Encoder(state_size=FLAGS.state_size,
                      embedding_size=FLAGS.embedding_size,
                      output_size=FLAGS.output_size)
    decoder = Decoder(state_size=FLAGS.state_size,
                      output_size=FLAGS.output_size)

    qa_args = {
        "embed_path": embed_path,
        "embedding_size": FLAGS.embedding_size,
        "output_size": FLAGS.output_size,
        "optimizer": FLAGS.optimizer,
        "learning_rate": FLAGS.learning_rate,
        "epochs": FLAGS.epochs,
        "batch_size": FLAGS.batch_size,
        "max_gradient_norm": FLAGS.max_gradient_norm,
        "dropout_keep_prob": 1.0 - FLAGS.dropout,
        "train_dir": FLAGS.train_dir,
        "state_size": FLAGS.state_size
    }
    qa = QASystem(encoder, decoder, **qa_args)

    if not os.path.exists(FLAGS.log_dir):
        os.makedirs(FLAGS.log_dir)
    file_handler = logging.FileHandler(pjoin(FLAGS.log_dir, "log.txt"))
    logging.getLogger().addHandler(file_handler)

    print(vars(FLAGS))
    with open(os.path.join(FLAGS.log_dir, "flags.json"), 'w') as fout:
        json.dump(FLAGS.__flags, fout)

    with tf.Session() as sess:
        load_train_dir = get_normalized_train_dir(FLAGS.load_train_dir
                                                  or FLAGS.train_dir)
        initialize_model(sess, qa, load_train_dir)

        save_train_dir = get_normalized_train_dir(FLAGS.train_dir)
        qa.train(sess, dataset, save_train_dir)
def main(_):

    # Do what you need to load datasets from FLAGS.data_dir
    dataset = []
    dataset.append(pjoin(FLAGS.data_dir, "train.ids.question"))
    dataset.append(pjoin(FLAGS.data_dir, "train.ids.context"))
    dataset.append(pjoin(FLAGS.data_dir, "train.span"))
    dataset.append(pjoin(FLAGS.data_dir, "val.ids.question"))
    dataset.append(pjoin(FLAGS.data_dir, "val.ids.context"))
    dataset.append(pjoin(FLAGS.data_dir, "val.span"))

    embed_path = FLAGS.embed_path or pjoin(
        "data", "squad", "glove.trimmed.{}.npz".format(FLAGS.embedding_size))
    vocab_path = FLAGS.vocab_path or pjoin(FLAGS.data_dir, "vocab.dat")
    vocab, rev_vocab = initialize_vocab(vocab_path)

    encoder = Encoder(size=FLAGS.state_size, vocab_dim=FLAGS.embedding_size)
    decoder = Decoder(output_size=FLAGS.paragraph_output_size)

    qa = QASystem(encoder, decoder)

    if not os.path.exists(FLAGS.log_dir):
        os.makedirs(FLAGS.log_dir)
    file_handler = logging.FileHandler(pjoin(FLAGS.log_dir, "log.txt"))
    logging.getLogger().addHandler(file_handler)

    print(vars(FLAGS))
    with open(os.path.join(FLAGS.log_dir, "flags.json"), 'w') as fout:
        json.dump(FLAGS.__flags, fout)

    with tf.Session() as sess:
        load_train_dir = get_normalized_train_dir(FLAGS.load_train_dir
                                                  or FLAGS.train_dir)
        initialize_model(sess, qa, load_train_dir)

        save_train_dir = get_normalized_train_dir(FLAGS.train_dir)
        qa.train(sess, dataset, save_train_dir)