Ejemplo n.º 1
0
def main(save_path, params):
    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    dataset = params['dataset']
    nlayers = params['nlayers']
    train_emb = params['train_emb']
    char_dim = params['char_dim']
    use_feat = params['use_feat']
    gating_fn = params['gating_fn']
    ent_setup = params['ent_setup']  # ent, ent-anonym, no-ent
    data_path = params['data_path']
    # save settings
    shutil.copyfile('config.py', '%s/config.py' % save_path)

    use_chars = char_dim > 0

    if dataset == "clicr":
        dp = DataPreprocessor.DataPreprocessorClicr()
        data = dp.preprocess(
            #"/mnt/b5320167-5dbd-4498-bf34-173ac5338c8d/Datasets/bmj_case_reports_data/dataset_json_concept_annotated/",
            data_path,
            ent_setup=ent_setup,
            no_training_set=False,
            use_chars=use_chars)
    elif dataset == "clicr_novice":
        dp = DataPreprocessor.DataPreprocessorNovice()
        data = dp.preprocess(data_path,
                             ent_setup=ent_setup,
                             no_training_set=False,
                             use_chars=use_chars)
    else:
        dp = DataPreprocessor.DataPreprocessor()
        data = dp.preprocess(data_path,
                             no_training_set=False,
                             use_chars=use_chars)

    print("building minibatch loaders ...")
    batch_loader_train = MiniBatchLoader.MiniBatchLoader(data.training,
                                                         BATCH_SIZE,
                                                         sample=1.0)
    batch_loader_val = MiniBatchLoader.MiniBatchLoader(data.validation,
                                                       BATCH_SIZE)

    print("building network ...")
    W_init, embed_dim, = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    m = GAReader.Model(nlayers, data.vocab_size, data.num_chars, W_init,
                       nhidden, embed_dim, dropout, train_emb, char_dim,
                       use_feat, gating_fn)

    print("training ...")
    num_iter = 0
    max_acc = 0.
    deltas = []

    logger = open(save_path + '/log', 'a')

    if os.path.isfile('%s/best_model.p' % save_path):
        print('loading previously saved model')
        m.load_model('%s/best_model.p' % save_path)
    else:
        print('saving init model')
        m.save_model('%s/model_init.p' % save_path)
        print('loading init model')
        m.load_model('%s/model_init.p' % save_path)

    for epoch in range(NUM_EPOCHS):
        estart = time.time()
        new_max = False

        for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames in batch_loader_train:
            loss, tr_acc, probs = m.train(dw, dt, qw, qt, c, a, m_dw, m_qw, tt,
                                          tm, m_c, cl)

            message = "Epoch %d TRAIN loss=%.4e acc=%.4f elapsed=%.1f" % (
                epoch, loss, tr_acc, time.time() - estart)
            print(message)
            logger.write(message + '\n')

            num_iter += 1
            if num_iter % VALIDATION_FREQ == 0:
                total_loss, total_acc, n, n_cand = 0., 0., 0, 0.

                for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames in batch_loader_val:
                    outs = m.validate(dw, dt, qw, qt, c, a, m_dw, m_qw, tt, tm,
                                      m_c, cl)
                    loss, acc, probs = outs[:3]

                    bsize = dw.shape[0]
                    total_loss += bsize * loss
                    total_acc += bsize * acc
                    n += bsize
                val_acc = total_acc / n
                if val_acc > max_acc:
                    max_acc = val_acc
                    m.save_model('%s/best_model.p' % save_path)
                    new_max = True
                message = "Epoch %d VAL loss=%.4e acc=%.4f max_acc=%.4f" % (
                    epoch, total_loss / n, val_acc, max_acc)
                print(message)
                logger.write(message + '\n')

        # m.save_model('%s/model_%d.p'%(save_path,epoch))
        message = "After Epoch %d: Train acc=%.4f, Val acc=%.4f" % (
            epoch, tr_acc, val_acc)
        print(message)
        logger.write(message + '\n')

        # learning schedule
        if epoch >= 2:
            m.anneal()
        # stopping criterion
        if not new_max:
            break

    logger.close()
Ejemplo n.º 2
0
def main(load_path, params, mode='test'):

    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    dataset = params['data']
    nlayers = params['nlayers']
    sub2vec = params['sub2vec']
    train_emb = params['train_emb']
    sub_dim = params['sub_dim']
    use_feat = params['use_feat']
    gating_fn = params['gating_fn']
    use_subs = sub_dim > 0
    dp = DataPreprocessor.DataPreprocessor()
    data = dp.preprocess(dataset, no_training_set=True, use_subs=use_subs)
    inv_vocab = data.inv_dictionary

    print("building minibatch loaders ...")
    if mode == 'test':
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.test, BATCH_SIZE)
    else:
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.validation, BATCH_SIZE)

    print("building network ...")
    W_init, embed_dim = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    S_init, sub_dim = Helpers.load_sub_embeddings(data.dictionary[1], sub2vec)
    m = model.Model(nlayers,
                    data.vocab_size,
                    data.num_chars,
                    W_init,
                    S_init,
                    nhidden,
                    embed_dim,
                    dropout,
                    train_emb,
                    sub_dim,
                    use_feat,
                    gating_fn,
                    save_attn=True)
    m.load_model('%s/best_model.p' % load_path)

    print("testing ...")
    pr = np.zeros((len(batch_loader_test.questions),
                   batch_loader_test.max_num_cand)).astype('float32')
    fids, attns = [], []
    total_loss, total_acc, n = 0., 0., 0
    for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames in batch_loader_test:
        outs = m.validate(dw, dt, qw, qt, c, a, m_dw, m_qw, tt, tm, m_c, cl)
        loss, acc, probs = outs[:3]
        attns += [[fnames[0], probs[0, :]] + [o[0, :, :] for o in outs[3:]]
                  ]  # store one attention
        bsize = dw.shape[0]
        total_loss += bsize * loss
        total_acc += bsize * acc
        fids += fnames
        n += bsize
        print("step" + str(n) + ",acc" + str(acc))

    logger = open(load_path + '/log', 'a', 0)
    message = '%s Loss %.4e acc=%.4f' % (mode.upper(), total_loss / n,
                                         total_acc / n)
    print message
    logger.write(message + '\n')
    logger.close()

    np.save('%s/%s.probs' % (load_path, mode), np.asarray(pr))
    pkl.dump(attns, open('%s/%s.attns' % (load_path, mode), 'w'))
    f = open('%s/%s.ids' % (load_path, mode), 'w')
    for item in fids:
        f.write(item + '\n')
    f.close()
Ejemplo n.º 3
0
def main(save_path, params):

    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    dataset = params['dataset']
    nlayers = params['nlayers']
    train_emb = params['train_emb']
    char_dim = params['char_dim']
    use_feat = params['use_feat']
    gating_fn = params['gating_fn']
    out = 'out'

    # save settings
    shutil.copyfile('config.py', '%s/config.py' % save_path)

    use_chars = char_dim > 0
    dp = DataPreprocessor.DataPreprocessor()
    data = dp.preprocess(dataset, no_training_set=False, use_chars=use_chars)
    word_dictionary = data.dictionary[0]
    the_index = word_dictionary['the']
    #print('the index : {}'.format(word_dictionary['the']))

    idx_to_word = dict([(v, k) for (k, v) in word_dictionary.iteritems()])
    words = [idx_to_word[i] for i in sorted(idx_to_word.keys())]

    print("building minibatch loaders ...")
    batch_loader_train = MiniBatchLoader.MiniBatchLoader(data.training,
                                                         BATCH_SIZE,
                                                         sample=1.0)
    batch_loader_val = MiniBatchLoader.MiniBatchLoader(data.validation,
                                                       BATCH_SIZE)

    print("building network ...")
    W_init, embed_dim, = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    #print('the embedding : {}'.format(W_init[the_index]))
    #print(W_init[0:5])

    print("running GAReader ...")

    m = GAReader.Model(nlayers, data.vocab_size, data.num_chars, W_init,
                       nhidden, embed_dim, dropout, train_emb, char_dim,
                       use_feat, gating_fn, words).build_network()
    m.compile(optimizer=tf.keras.optimizers.Adam(lr=LEARNING_RATE,
                                                 clipnorm=GRAD_CLIP),
              loss=tf.keras.losses.categorical_crossentropy,
              metrics=[tf.keras.metrics.categorical_accuracy])
    #tf.enable_eager_execution(config=tf.ConfigProto(allow_soft_placement = True))
    with tf.Graph().as_default():
        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True)) as sess:
            K.set_session(sess)
            #with tf.device('/gpu:0:'):
            tensorboard = TensorBoardCustom(log_dir="logs", words=words)
            modelcheckpoint = tf.keras.callbacks.ModelCheckpoint(
                'output/weights.{epoch:02d}-{val_loss:.2f}.hdf5')
            writer = tf.summary.FileWriter("logs")

            def schedule(epoch, lr):

                if epoch >= 3:
                    return lr * 0.5
                else:
                    return lr

            lrate = LearningRateScheduler(schedule, verbose=1)

            for epoch in xrange(NUM_EPOCHS):
                for (inputs, a) in batch_loader_train:
                    [dw, qw, m_dw, m_qw, c, m_c, cl] = inputs
                    m = GAReader.Model(nlayers, data.vocab_size,
                                       data.num_chars, W_init, nhidden,
                                       embed_dim, dropout, train_emb, char_dim,
                                       use_feat, gating_fn,
                                       words).build_network()
                    m.compile(optimizer=tf.keras.optimizers.Adam(
                        lr=LEARNING_RATE, clipnorm=GRAD_CLIP),
                              loss=tf.keras.losses.categorical_crossentropy,
                              metrics=[tf.keras.metrics.categorical_accuracy])
                    #print(dw.shape)
                    #print('dw : {}'.format(dw))
                    #print('qw : {}'.format(qw))
                    #print('m_dw : {}'.format(m_dw))
                    #print('m_qw : {}'.format(m_qw))
                    #print('c : {}'.format(c))
                    #print([idx_to_word[i] for i in dw[0, :, 0].tolist()])
                    train_summary = m.train_on_batch(
                        inputs,
                        to_categorical(a, batch_loader_train.max_num_cand))
                    print(m.get_weights()[0])
                    print('epoch: {}, train loss: {}, train acc: {}'.format(
                        epoch, train_summary[0], train_summary[1]))
                    lr = tf.summary.scalar('learning_rate', LEARNING_RATE)
                    summary = tf.summary.merge_all()
                    s = sess.run(summary)
                    writer.add_summary(s)
                writer.close()
Ejemplo n.º 4
0
def main(save_path, params):

    regularizer = params['regularizer']
    rlambda = params['lambda']
    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    dataset = params['dataset']
    nlayers = params['nlayers']
    train_emb = params['train_emb']
    subsample = params['subsample']
    base_model = params['model']
    char_dim = params['char_dim']
    use_feat = params['use_feat']
    train_cut = params['train_cut']
    gating_fn = params['gating_fn']

    # save settings
    shutil.copyfile('config.py','%s/config.py'%save_path)

    use_chars = char_dim>0
    dp = DataPreprocessor.DataPreprocessor()
    data = dp.preprocess(dataset, use_chars=use_chars)

    print("building minibatch loaders ...")
    batch_loader_train = MiniBatchLoader.MiniBatchLoader(data.training, BATCH_SIZE, data.dictionary,
            sample=train_cut, max_qry_len=85)
    batch_loader_val = MiniBatchLoader.MiniBatchLoader(data.validation, BATCH_SIZE, data.dictionary, max_qry_len=85)
    batch_loader_test = MiniBatchLoader.MiniBatchLoader(data.test, BATCH_SIZE, data.dictionary)

    print("building network ...")
    W_init, embed_dim, = Helpers.load_word2vec_embeddings(data.dictionary[0], word2vec)
    m = eval(base_model).Model(nlayers, data.vocab_size, data.num_chars, W_init, 
        regularizer, rlambda, nhidden, embed_dim, dropout, train_emb, subsample, 
            char_dim, use_feat, data.dictionary[4])

    print("training ...")
    num_iter = 0
    max_acc = 0.
    deltas = []
    test_acc = 0.

    logger = open(save_path+'/log','a',0)

    # if os.path.isfile('%s/best_model.p'%save_path):
    #     print('loading previously saved model')
    #     m.load_model('%s/best_model.p'%save_path)
    # else:
    #     print('saving init model')
    #     m.save_model('%s/model_init.p'%save_path)
    #     print('loading init model')
    #     m.load_model('%s/model_init.p'%save_path)

    for epoch in xrange(NUM_EPOCHS):
        estart = time.time()
        new_max = False

        for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames, match_feat, use_char, use_char_q in batch_loader_train:
            loss, tr_acc, probs = m.train(dw, dt, qw, qt, c, a, m_dw, m_qw, tt, tm, m_c, cl, match_feat, use_char, use_char_q)

            # message = "Epoch %d TRAIN loss=%.4e acc=%.4f elapsed=%.1f" % (
            #         epoch, loss, tr_acc, time.time()-estart)
            # print message
            # logger.write(message+'\n')

            if num_iter % VALIDATION_FREQ == 0:
                total_loss, total_acc, n, n_cand = 0., 0., 0, 0.

                for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames, match_feat, use_char, use_char_q in batch_loader_val:
                    outs = m.validate(dw, dt, qw, qt, c, a, 
                            m_dw, m_qw, tt, tm, m_c, cl, match_feat, use_char, use_char_q)
                    loss, acc, probs = outs[:3]

                    bsize = dw.shape[0]
                    total_loss += bsize*loss
                    total_acc += bsize*acc
                    n += bsize

                val_acc = total_acc/n
                if val_acc > max_acc:
                    max_acc = val_acc
                    m.save_model('%s/best_model.p'%save_path)

                    temp_acc, temp_n = 0.0, 0

                    for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames, match_feat, use_char, use_char_q in batch_loader_test:
                        outs = m.validate(dw, dt, qw, qt, c, a, 
                            m_dw, m_qw, tt, tm, m_c, cl, match_feat, use_char, use_char_q)
                        _, acc, _ = outs[:3]
                        bsize = dw.shape[0]
                        temp_acc += bsize * acc
                        temp_n += bsize

                    test_acc = temp_acc / temp_n

                    new_max = True
                message = "Epoch %d VAL loss=%.4e acc=%.4f max_acc=%.4f test=%.4f" % (
                    epoch, total_loss/n, val_acc, max_acc, test_acc)
                print message
                logger.write(message+'\n')

            num_iter += 1

        m.save_model('%s/model_%d.p'%(save_path,epoch))
        message = "After Epoch %d: Train acc=%.4f, Val acc=%.4f" % (epoch, tr_acc, val_acc)
        print message
        logger.write(message+'\n')
        
        # learning schedule
        if epoch >=2:
            m.anneal()
        # stopping criterion
        if not new_max:
            break

    logger.close()
Ejemplo n.º 5
0
def main(load_path, params, mode='test'):

    regularizer = params['regularizer']
    rlambda = params['lambda']
    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    dataset = params['dataset']
    nlayers = params['nlayers']
    train_emb = params['train_emb']
    subsample = params['subsample']
    base_model = params['model']
    char_dim = params['char_dim']
    use_feat = params['use_feat']
    gating_fn = params['gating_fn']

    # load settings
    shutil.copyfile('%s/config.py' % load_path, 'config.py')

    dp = DataPreprocessor.DataPreprocessor()
    data = dp.preprocess(dataset)
    inv_vocab = data.inv_dictionary

    print("building minibatch loaders ...")
    if mode == 'test':
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.test, BATCH_SIZE, data.dictionary)
    else:
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.validation, BATCH_SIZE, data.dictionary)

    print("building network ...")
    W_init, embed_dim = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    m = eval(base_model).Model(nlayers, data.vocab_size, data.num_chars,
                               W_init, regularizer, rlambda, nhidden,
                               embed_dim, dropout, train_emb, subsample,
                               char_dim, use_feat, data.dictionary[4])
    m.load_model('%s/best_model.p' % load_path)

    print("testing ...")
    pr = np.zeros((len(batch_loader_test.questions),
                   batch_loader_test.max_num_cand)).astype('float32')
    fids, attns = [], []
    total_loss, total_acc, n = 0., 0., 0
    for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames, match_feat, use_char, use_char_q in batch_loader_test:
        outs = m.validate(dw, dt, qw, qt, c, a, m_dw, m_qw, tt, tm, m_c, cl,
                          match_feat, use_char, use_char_q)
        loss, acc, probs = outs[:3]
        attns += [[fnames[0], probs[0, :]] + [o[0, :, :] for o in outs[3:]]
                  ]  # store one attention

        bsize = dw.shape[0]
        total_loss += bsize * loss
        total_acc += bsize * acc

        pr[n:n + bsize, :] = probs
        fids += fnames
        n += bsize

    logger = open(load_path + '/log', 'a', 0)
    message = '%s Loss %.4e acc=%.4f' % (mode.upper(), total_loss / n,
                                         total_acc / n)
    print message
    logger.write(message + '\n')
    logger.close()

    np.save('%s/%s.probs' % (load_path, mode), np.asarray(pr))
    pkl.dump(attns, open('%s/%s.attns' % (load_path, mode), 'w'))
    f = open('%s/%s.ids' % (load_path, mode), 'w')
    for item in fids:
        f.write(item + '\n')
    f.close()
Ejemplo n.º 6
0
def main(load_path, params, mode='test'):
    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    dataset = params['dataset']
    nlayers = params['nlayers']
    train_emb = params['train_emb']
    char_dim = params['char_dim']
    use_feat = params['use_feat']
    gating_fn = params['gating_fn']
    ent_setup = params['ent_setup']
    data_path = params['data_path']
    # save settings
    shutil.copyfile('config.py', '%s/config_test.py' % load_path)
    use_chars = char_dim > 0

    if dataset == "clicr":
        dp = DataPreprocessor.DataPreprocessorClicr()
        #dataset_path = "/mnt/b5320167-5dbd-4498-bf34-173ac5338c8d/Datasets/bmj_case_reports_data/dataset_json_concept_annotated/"
        #dataset_path = "data/"
        data = dp.preprocess(data_path,
                             ent_setup=ent_setup,
                             no_training_set=True)
    elif dataset == "clicr_novice":
        dp = DataPreprocessor.DataPreprocessorNovice()
        data = dp.preprocess(data_path,
                             ent_setup=ent_setup,
                             no_training_set=True)
    else:
        dp = DataPreprocessor.DataPreprocessor()
        data = dp.preprocess(data_path, no_training_set=True)
    inv_vocab = data.inv_dictionary

    assert os.path.exists(params["test_file"] if mode ==
                          "test" else params["validation_file"])

    print("building minibatch loaders ...")
    if mode == 'test':
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.test, BATCH_SIZE)
    else:
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.validation, BATCH_SIZE)
    f_to_cand = {i[-1]: i[3] for i in batch_loader_test.questions}

    print("building network ...")
    W_init, embed_dim = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    m = GAReader.Model(nlayers,
                       data.vocab_size,
                       data.num_chars,
                       W_init,
                       nhidden,
                       embed_dim,
                       dropout,
                       train_emb,
                       char_dim,
                       use_feat,
                       gating_fn,
                       save_attn=False)
    print("model load path")
    print('%s/best_model.p' % load_path)
    m.load_model('%s/best_model.p' % load_path)

    print("testing ...")
    pr = np.zeros((len(batch_loader_test.questions),
                   batch_loader_test.max_num_cand)).astype('float32')
    fids, attns = [], []
    pred_ans = {}
    total_loss, total_acc, n = 0., 0., 0
    for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames in batch_loader_test:
        outs = m.validate(dw, dt, qw, qt, c, a, m_dw, m_qw, tt, tm, m_c, cl)
        loss, acc, probs = outs[:3]
        attns += [[fnames[0], probs[0, :]] + [o[0, :, :] for o in outs[3:]]
                  ]  # store one attention

        for f in range(len(fnames)):
            pred_cand = probs[f].argmax()
            pred_a_ids = f_to_cand[fnames[f]][pred_cand]
            pred_a = " ".join([inv_vocab[i] for i in pred_a_ids])
            if ent_setup == "ent-anonym" and (dataset == "clicr"
                                              or dataset == "clicr_novice"):
                relabeling_dicts = data.test_relabeling_dicts if mode == 'test' else data.val_relabeling_dicts
                pred_a = relabeling_dicts[fnames[f]][pred_a]
            pred_ans[fnames[f]] = pred_a

        bsize = dw.shape[0]
        total_loss += bsize * loss
        total_acc += bsize * acc

        pr[n:n + bsize, :] = probs
        fids += fnames
        n += bsize

    if (params["dataset"] == "clicr" or params["dataset"] == "clicr_plain" or params["dataset"] == "clicr_novice") \
            and (mode == 'test' or mode == 'validation'):
        print("writing predictions")
        preds_data = utils.to_output_preds(pred_ans)
        preds_filepath = load_path + '/{}.preds'.format(mode)
        utils.write_preds(preds_data, file_name=preds_filepath)
        utils.external_eval(preds_filepath,
                            preds_filepath + ".scores",
                            params["test_file"]
                            if mode == "test" else params["validation_file"],
                            extended=True)
    logger = open(load_path + '/log.test', 'a')
    message = '%s Loss %.4e acc=%.4f' % (mode.upper(), total_loss / n,
                                         total_acc / n)
    print(message)
    logger.write(message + '\n')
    logger.close()

    np.save('%s/%s.probs' % (load_path, mode), np.asarray(pr))
    pickle.dump(attns, open('%s/%s.attns' % (load_path, mode), 'wb'))
    f = open('%s/%s.ids' % (load_path, mode), 'w')
    for item in fids:
        f.write(item + '\n')
    f.close()
Ejemplo n.º 7
0
def main(save_path, params):
    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    sub2vec = params['sub2vec']
    subdict = params['subdic']
    dataset = params['data']
    nlayers = params['nlayers']
    train_emb = params['train_emb']
    sub_dim = params['sub_dim']
    use_feat = params['use_feat']
    gating_fn = params['gating_fn']

    # save settings
    shutil.copyfile('config.py', '%s/config.py' % save_path)

    use_subs = sub_dim > 0
    dp = DataPreprocessor.DataPreprocessor()
    data = dp.preprocess(dataset,
                         no_training_set=False,
                         use_subs=use_subs,
                         subdict=subdict)

    print "building minibatch loaders ...", datetime.now().strftime(
        '%Y-%m-%d %H:%M:%S')
    batch_loader_train = MiniBatchLoader.MiniBatchLoader(data.training,
                                                         BATCH_SIZE,
                                                         sample=1)
    batch_loader_val = MiniBatchLoader.MiniBatchLoader(data.validation,
                                                       BATCH_SIZE)

    print "building network ...", datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    W_init, embed_dim, = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    S_init, sub_dim = Helpers.load_sub_embeddings(data.dictionary[1], sub2vec)
    m = model.Model(nlayers, data.vocab_size, data.num_chars, W_init, S_init,
                    nhidden, embed_dim, dropout, train_emb, sub_dim, use_feat,
                    gating_fn)

    print "training ...", datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    num_iter = 0
    max_acc = 0.
    deltas = []

    logger = open(save_path + '/log', 'a', 0)

    if os.path.isfile('%s/best_model.p' % save_path):
        print 'loading previously saved model', datetime.now().strftime(
            '%Y-%m-%d %H:%M:%S')
        m.load_model('%s/best_model.p' % save_path)
        print "model loaded"
    else:
        print 'saving init model', datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        m.save_model('%s/model_init.p' % save_path)
        print 'loading init model', datetime.now().strftime(
            '%Y-%m-%d %H:%M:%S')
        m.load_model('%s/model_init.p' % save_path)
    for epoch in xrange(NUM_EPOCHS):
        print "epochs training ...", datetime.now().strftime(
            '%Y-%m-%d %H:%M:%S')
        estart = time.time()
        new_max = False
        for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames in batch_loader_train:
            loss, tr_acc, probs = m.train(dw, dt, qw, qt, c, a, m_dw, m_qw, tt,
                                          tm, m_c, cl)

            message = "Epoch %d TRAIN loss=%.4e acc=%.4f elapsed=%.1f" % (
                epoch, loss, tr_acc, time.time() - estart)
            print message
            logger.write(message + '\n')

            num_iter += 1
            if num_iter % VALIDATION_FREQ == 0:
                total_loss, total_acc, n, n_cand = 0., 0., 0, 0.

                for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames in batch_loader_val:
                    outs = m.validate(dw, dt, qw, qt, c, a, m_dw, m_qw, tt, tm,
                                      m_c, cl)
                    loss, acc, probs = outs[:3]

                    bsize = dw.shape[0]
                    total_loss += bsize * loss
                    total_acc += bsize * acc
                    n += bsize
                print('validate on ', str(n) + 'validation data')
                val_acc = total_acc / n
                if val_acc > max_acc:
                    max_acc = val_acc
                    m.save_model('%s/best_model.p' % save_path)
                    new_max = True
                message = "Epoch %d VAL loss=%.4e acc=%.4f max_acc=%.4f" % (
                    epoch, total_loss / n, val_acc, max_acc)
                print message
                logger.write(message + '\n')

        m.save_model('%s/model_%d.p' % (save_path, epoch))
        message = "After Epoch %d: Train acc=%.4f, Val acc=%.4f" % (
            epoch, tr_acc, val_acc)
        print message
        logger.write(message + '\n')

        # learning schedule
        if epoch >= 2:
            m.anneal()
        # stopping criterion
        if not new_max:
            break

    logger.close()
Ejemplo n.º 8
0
output_path = sys.argv[2]
dataset = sys.argv[3]
K = int(sys.argv[4])
top_K = 3

dp = DataPreprocessor.DataPreprocessor()

# NOTE: make sure vocab.txt is already there!
data = dp.preprocess(DATASET, no_training_set=True)
inv_vocab = data.inv_dictionary

print("building minibatch loaders ...")
if not 'CANDIDATE_SUBSET' in locals():
    CANDIDATE_SUBSET = False
if dataset == 'validation':
    batch_loader_test = MiniBatchLoader.MiniBatchLoader(
        data.validation, 128, shuffle=False, candidate_subset=CANDIDATE_SUBSET)
elif dataset == 'test':
    batch_loader_test = MiniBatchLoader.MiniBatchLoader(
        data.test, 128, shuffle=False, candidate_subset=CANDIDATE_SUBSET)

print("building network ...")
m = GAReader.Model(K, data.vocab_size)

print("loading model from file...")
m.load_model(model_path)

print("predicting ...")

fid = open(output_path, 'w', 0)

pr = []
Ejemplo n.º 9
0
def main(save_path, params, mode='train'):

    word2vec = params['word2vec']
    dataset = params['dataset']

    dp = DataPreprocessor.DataPreprocessor()
    data = dp.preprocess_rc(params, dataset)

    print("building minibatch loaders ...")
    batch_loader_train = MiniBatchLoader.MiniBatchLoaderMention(
        params, data.training, params['batch_size'])
    batch_loader_val = MiniBatchLoader.MiniBatchLoaderMention(
        params,
        data.validation,
        params['batch_size'],
        shuffle=False,
        ensure_answer=False)
    batch_loader_test = MiniBatchLoader.MiniBatchLoaderMention(
        params,
        data.test,
        params['batch_size'],
        shuffle=False,
        ensure_answer=False)

    print("building network ...")
    W_init, embed_dim, = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    m = GA.Model(params, W_init, embed_dim)

    print("training ...")
    num_iter = 0
    max_acc = 0.0
    min_loss = 1e5

    logger = open(save_path + '/log', 'a', 0)
    train_writer = tf.summary.FileWriter(os.path.join(save_path, 'train'))
    val_writer = tf.summary.FileWriter(os.path.join(save_path, 'val'))

    if params['reload_']:
        print('loading previously saved model')
        saves = pkl.load(open('%s/checkpoints.p' % save_path))
        m.load_model('%s/best_model.p' % save_path, saves[-1])

    # train
    if mode == 'train':
        saves = []
        for epoch in xrange(params['num_epochs']):
            estart = time.time()
            stop_flag = False

            for example in batch_loader_train:
                loss, tr_acc, probs, summary = m.train(*example[:-2])

                if num_iter % params['logging_frequency'] == 0:
                    message = (
                        "Epoch %d TRAIN loss=%.4e acc=%.4f elapsed=%.1f" %
                        (epoch, loss, tr_acc, time.time() - estart))
                    print(message)
                    logger.write(message + '\n')
                    train_writer.add_summary(summary, num_iter)

                num_iter += 1
                if num_iter % params['validation_frequency'] == 0:
                    total_loss, total_acc, n = 0., 0., 0.

                    for example in batch_loader_val:
                        outs = m.validate(*example[:-2])
                        loss, acc, probs = outs[:3]

                        bsize = example[0].shape[0]
                        total_loss += bsize * loss
                        total_acc += bsize * acc
                        n += bsize

                    val_acc = total_acc / n
                    print("11111111111   ", val_acc)
                    if val_acc > max_acc:
                        max_acc = val_acc
                        save_id = num_iter
                        print("111111111111111111111111111111")
                        sv = m.save_model('%s/best_model.p' % save_path,
                                          save_id)
                        saves.append(save_id)
                        new_max = True

                    val_loss = total_loss / n
                    message = "Epoch %d VAL loss=%.4e acc=%.4f max_acc=%.4f" % (
                        epoch, val_loss, val_acc, max_acc)
                    print(message)
                    logger.write(message + '\n')

                    _add_summary(val_writer, val_loss, "loss", num_iter)
                    _add_summary(val_writer, val_acc, "accuracy", num_iter)

                    # stopping
                    if val_loss < min_loss: min_loss = val_loss
                    if params['stopping_criterion'] and (
                            val_loss - min_loss) / min_loss > 0.3:
                        stop_flag = True
                        break

                if num_iter % params["anneal_frequency"] == 0:
                    m.anneal()

            #m.save_model('%s/model_%d.p'%(save_path,epoch))
            message = "After Epoch %d: Train acc=%.4f, Val acc=%.4f" % (
                epoch, tr_acc, max_acc)
            print(message)
            logger.write(message + '\n')

            if stop_flag: break
        # record all saved models
        pkl.dump(saves, open('%s/checkpoints.p' % save_path, 'w'))

    # test
    mode = 'test' if mode in ['train', 'test'] else 'val'
    print("testing ...")
    try:
        saves = pkl.load(open('%s/checkpoints.p' % save_path))
        print('%s/checkpoints.p' % save_path)
    except IOError:

        def _to_num(foo):
            try:
                num = int(foo)
            except ValueError:
                return None
            return num

        saves = []
        for directory in os.listdir(save_path):
            if not os.path.isdir(os.path.join(save_path, directory)): continue
            num = _to_num(directory)
            if num is None: continue
            saves.append(num)

        saves = sorted(saves)
    print("saves111111", saves)
    if not saves:
        print("No models saved during training!")
        return
    print('loading model')
    m.load_model('%s/best_model.p' % save_path, saves[-1])

    total_loss, total_acc, n = 0., 0., 0
    answer_structure = {}
    idict = data.inv_dictionary
    for example in batch_loader_val:
        outs = m.validate(*example[:-2])
        loss, acc, probs = outs[:3]

        pred_indices = np.argmax(probs, axis=1)
        for i in range(len(example[-1])):
            cname = str(example[-1][i]).strip()
            gt_answer = example[10][i]
            answer_structure[cname] = (pred_indices[i], gt_answer, probs[i, :])

        bsize = example[0].shape[0]
        total_loss += bsize * loss
        total_acc += bsize * acc

        n += bsize
    test_acc = total_acc / n
    test_loss = total_loss / n
    message = "TEST loss=%.4e acc=%.4f" % (test_loss, test_acc)
    print(message)
    logger.write(message + '\n')
    pkl.dump(answer_structure,
             open(os.path.join(save_path, "test_answer_structure.p"), "w"))

    logger.close()

    # clean up
    print("Cleaning up saved models ...")
Ejemplo n.º 10
0
def main(load_path, params, mode='test'):

    nhidden = params['nhidden']
    dropout = params['dropout']
    word2vec = params['word2vec']
    dataset = params['dataset']
    nlayers = params['nlayers']
    train_emb = params['train_emb']
    char_dim = params['char_dim']
    use_feat = params['use_feat']
    gating_fn = params['gating_fn']

    dp = DataPreprocessor.DataPreprocessor()
    data = dp.preprocess(dataset, no_training_set=True)
    inv_vocab = data.inv_dictionary

    print("building minibatch loaders ...")
    if mode == 'test':
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.test, BATCH_SIZE)
    else:
        batch_loader_test = MiniBatchLoader.MiniBatchLoader(
            data.validation, BATCH_SIZE)

    print("building network ...")
    W_init, embed_dim = Helpers.load_word2vec_embeddings(
        data.dictionary[0], word2vec)
    m = Reader.Model(nlayers,
                     data.vocab_size,
                     data.num_chars,
                     W_init,
                     nhidden,
                     embed_dim,
                     dropout,
                     train_emb,
                     char_dim,
                     use_feat,
                     gating_fn,
                     save_attn=True)
    m.load_model('%s/best_model.p' % load_path)

    print("testing ...")
    pr = np.zeros((len(batch_loader_test.questions),
                   batch_loader_test.max_num_cand)).astype('float32')
    fids, attns = [], []
    total_loss, total_acc, n = 0., 0., 0
    result = {}
    for dw, dt, qw, qt, a, m_dw, m_qw, tt, tm, c, m_c, cl, fnames in batch_loader_test:
        outs = m.validate(dw, dt, qw, qt, c, a, m_dw, m_qw, tt, tm, m_c, cl)
        loss, acc, probs = outs[:3]
        attns += [[fnames[0], probs[0, :]] + [o[0, :, :] for o in outs[3:]]
                  ]  # store one attention

        bsize = dw.shape[0]
        total_loss += bsize * loss
        total_acc += bsize * acc

        pr[n:n + bsize, :] = probs
        fids += fnames
        n += bsize

        answer = probs.argmax(1)
        for it in range(len(fnames)):
            tid = fnames[it].split('/')[-1].split('.')[0].strip()
            result[eval(tid)] = answer[it]
            print tid, answer[it]
        print('probs----', probs)
        #print('a----', a)
        print('fnames----', fnames)

    print len(result)
    with open('raw.txt', 'w') as ff:
        for i in range(1, 2501):
            ff.write(str(result[i]) + '\n')

    logger = open(load_path + '/log', 'a', 0)
    message = '%s Loss %.4e acc=%.4f' % (mode.upper(), total_loss / n,
                                         total_acc / n)
    print message
    logger.write(message + '\n')
    logger.close()

    np.save('%s/%s.probs' % (load_path, mode), np.asarray(pr))
    pkl.dump(attns, open('%s/%s.attns' % (load_path, mode), 'w'))
    f = open('%s/%s.ids' % (load_path, mode), 'w')
    for item in fids:
        f.write(item + '\n')
    f.close()