Beispiel #1
0
def main():
    print 'Starting at: {}\n'.format(datetime.now())
    s_time = time.time()
    df = read_df(args.df_path)
    df = df.fillna(u'')

    label_tags = pickle.load(open(args.tags_file, 'rb'))
    print '\nloaded {} tags'.format(len(label_tags))

    raw_corpus = myio.read_corpus(args.corpus_w_tags, with_tags=True)

    embedding_layer = create_embedding_layer(
        n_d=200,
        embs=load_embedding_iterator(args.embeddings),
        only_words=False if args.use_embeddings else True,
        # only_words will take the words from embedding file and make random initial embeddings
        trainable=args.trainable
    )

    ids_corpus = myio.map_corpus(raw_corpus, embedding_layer, label_tags, max_len=args.max_seq_len)

    print("vocab size={}, corpus size={}\n".format(embedding_layer.n_V, len(raw_corpus)))

    padding_id = embedding_layer.vocab_map["<padding>"]

    if args.reweight:
        weights = myio.create_idf_weights(args.corpus_w_tags, embedding_layer, with_tags=True)

    if args.layer.lower() == "lstm":
        from models import LstmMultiTagsClassifier as Model
    elif args.layer.lower() in ["bilstm", "bigru"]:
        from models import BiRNNMultiTagsClassifier as Model
    elif args.layer.lower() == "cnn":
        from models import CnnMultiTagsClassifier as Model
    elif args.layer.lower() == "gru":
        from models import GruMultiTagsClassifier as Model
    else:
        raise Exception("no correct layer given")

    if args.cross_val:
        train, dev, test = myio.create_cross_val_batches(df, ids_corpus, args.batch_size, padding_id)
    else:
        dev = list(myio.create_batches(
            df, ids_corpus, 'dev', args.batch_size, padding_id, N_neg=args.n_neg, samples_file=args.samples_file))
        test = list(myio.create_batches(
            df, ids_corpus, 'test', args.batch_size, padding_id, N_neg=args.n_neg, samples_file=args.samples_file))
    # baselines_eval(train, dev, test)

    model = Model(args, embedding_layer, len(label_tags), weights=weights if args.reweight else None)
    model.ready()

    print 'total (non) trainable params: ', model.num_parameters()

    if args.load_pre_trained_part:
        # need to remove the old assigns to embeddings
        model.init_assign_ops = model.load_pre_trained_part(args.load_pre_trained_part)
    print '\nmodel init_assign_ops: {}\n'.format(model.init_assign_ops)

    model.train_model(df, ids_corpus, dev=dev, test=test)
    print '\nEnded at: {}'.format(datetime.now())
Beispiel #2
0
def main(args):
    raw_corpus = myio.read_corpus(args.corpus)
    embedding_layer = myio.create_embedding_layer(
        raw_corpus,
        n_d=args.hidden_dim,
        embs=load_embedding_iterator(args.embeddings)
        if args.embeddings else None)
    ids_corpus = myio.map_corpus(raw_corpus,
                                 embedding_layer,
                                 max_len=args.max_seq_len)
    say("vocab size={}, corpus size={}\n".format(embedding_layer.n_V,
                                                 len(raw_corpus)))
    padding_id = embedding_layer.vocab_map["<padding>"]

    if args.reweight:
        weights = myio.create_idf_weights(args.corpus, embedding_layer)

    if args.dev:
        dev_raw = myio.read_annotations(args.dev, K_neg=-1, prune_pos_cnt=-1)
        dev = myio.create_eval_batches(ids_corpus,
                                       dev_raw,
                                       padding_id,
                                       pad_left=not args.average,
                                       merge=args.merge)
    if args.test:
        test_raw = myio.read_annotations(args.test, K_neg=-1, prune_pos_cnt=-1)
        test = myio.create_eval_batches(ids_corpus,
                                        test_raw,
                                        padding_id,
                                        pad_left=not args.average,
                                        merge=args.merge)

    if args.train:
        start_time = time.time()
        train = myio.read_annotations(args.train)
        train_batches = myio.create_batches(ids_corpus,
                                            train,
                                            args.batch_size,
                                            padding_id,
                                            pad_left=not args.average,
                                            merge=args.merge)
        say("{} to create batches\n".format(time.time() - start_time))
        say("{} batches, {} tokens in total, {} triples in total\n".format(
            len(train_batches), sum(len(x[0].ravel()) for x in train_batches),
            sum(len(x[1].ravel()) for x in train_batches)))
        train_batches = None

        model = Model(args,
                      embedding_layer,
                      weights=weights if args.reweight else None)
        model.ready()

        # set parameters using pre-trained network
        if args.load_pretrain:
            model.encoder.load_pretrained_parameters(args)

        model.train(ids_corpus, train, (dev, dev_raw) if args.dev else None,
                    (test, test_raw) if args.test else None)
Beispiel #3
0
def main(args):
    raw_corpus = myio.read_corpus(args.corpus)
    embedding_layer = myio.create_embedding_layer(
                raw_corpus,
                n_d = args.hidden_dim,
                cut_off = args.cut_off,
                embs = load_embedding_iterator(args.embeddings) if args.embeddings else None
            )
    ids_corpus = myio.map_corpus(raw_corpus, embedding_layer)
    say("vocab size={}, corpus size={}\n".format(
            embedding_layer.n_V,
            len(raw_corpus)
        ))
    padding_id = embedding_layer.vocab_map["<padding>"]
    bos_id = embedding_layer.vocab_map["<s>"]
    eos_id = embedding_layer.vocab_map["</s>"]

    if args.reweight:
        weights = myio.create_idf_weights(args.corpus, embedding_layer)

    if args.dev:
        dev = myio.read_annotations(args.dev, K_neg=20, prune_pos_cnt=-1)
        dev = myio.create_eval_batches(ids_corpus, dev, padding_id)
    if args.test:
        test = myio.read_annotations(args.test, K_neg=20, prune_pos_cnt=-1)
        test = myio.create_eval_batches(ids_corpus, test, padding_id)

    if args.heldout:
        with open(args.heldout) as fin:
            heldout_ids = fin.read().split()
        heldout_corpus = dict((id, ids_corpus[id]) for id in heldout_ids if id in ids_corpus)
        train_corpus = dict((id, ids_corpus[id]) for id in ids_corpus
                                                if id not in heldout_corpus)
        heldout = myio.create_batches(heldout_corpus, [ ], args.batch_size,
                    padding_id, bos_id, eos_id, auto_encode=True)
        heldout = [ myio.create_one_batch(b1, t2, padding_id) for t1, b1, t2 in heldout ]
        say("heldout examples={}\n".format(len(heldout_corpus)))

    if args.train:
        model = Model(args, embedding_layer,
                      weights=weights if args.reweight else None)

        start_time = time.time()
        train = myio.read_annotations(args.train)
        if not args.use_anno: train = [ ]
        train_batches = myio.create_batches(ids_corpus, train, args.batch_size,
                    model.padding_id, model.bos_id, model.eos_id, auto_encode=True)
        say("{} to create batches\n".format(time.time()-start_time))

        model.ready()
        model.train(
                ids_corpus if not args.heldout else train_corpus,
                train,
                dev if args.dev else None,
                test if args.test else None,
                heldout if args.heldout else None
            )
Beispiel #4
0
def main(args):
    raw_corpus = myio.read_corpus(args.corpus)
    print("raw corpus:", args.corpus, "len:", len(raw_corpus))
    embedding_layer = myio.create_embedding_layer(
                raw_corpus,
                n_d = args.hidden_dim,
                cut_off = args.cut_off,
                embs = None # embs = load_embedding_iterator(args.embeddings) if args.embeddings else None
            )
    ids_corpus = myio.map_corpus(raw_corpus, embedding_layer, max_len=args.max_seq_len)
    myio.say("vocab size={}, corpus size={}\n".format(
            embedding_layer.n_V,
            len(raw_corpus)
        ))
    padding_id = embedding_layer.vocab_map["<padding>"]
 
    if args.reweight:
        weights = myio.create_idf_weights(args.corpus, embedding_layer)

# 
#     if args.dev:
#         dev = myio.read_annotations(args.dev, K_neg=-1, prune_pos_cnt=-1)
#         dev = myio.create_eval_batches(ids_corpus, dev, padding_id, pad_left = not args.average)
#     if args.test:
#         test = myio.read_annotations(args.test, K_neg=-1, prune_pos_cnt=-1)
#         test = myio.create_eval_batches(ids_corpus, test, padding_id, pad_left = not args.average)
 
    if args.train:
        start_time = time.time()
        train = myio.read_annotations(args.train)
        print("training data:", args.train, "len:", len(train))
        train_batches = myio.create_batches(ids_corpus, train, args.batch_size,
                                padding_id, pad_left = not args.average)
        myio.say("{:.2f} secs to create {} batches of size {}\n".format( (time.time()-start_time), len(train_batches), args.batch_size))
        myio.say("{} batches, {} tokens in total, {} triples in total\n".format(
                len(train_batches),
                sum(len(x[0].ravel())+len(x[1].ravel()) for x in train_batches),
                sum(len(x[2].ravel()) for x in train_batches)
            ))
#         train_batches = None
 
        model = Model(args, embedding_layer,
                      weights=weights if args.reweight else None)
        model.ready()
 
#         # set parameters using pre-trained network
#         if args.load_pretrain:
#             model.load_pretrained_parameters(args)
# 
        model.train(
                ids_corpus,
                train,
                dev = None, # dev if args.dev else None,
                test = None # test if args.test else None
            )
Beispiel #5
0
def main(args):
    raw_corpus = myio.read_corpus(args.corpus)
    embedding_layer = myio.create_embedding_layer(
                raw_corpus,
                n_d = args.hidden_dim,
                embs = load_embedding_iterator(args.embeddings) if args.embeddings else None
            )
    ids_corpus = myio.map_corpus(raw_corpus, embedding_layer, max_len=args.max_seq_len)
    say("vocab size={}, corpus size={}\n".format(
            embedding_layer.n_V,
            len(raw_corpus)
        ))
    padding_id = embedding_layer.vocab_map["<padding>"]

    if args.reweight:
        weights = myio.create_idf_weights(args.corpus, embedding_layer)

    if args.dev:
        dev_raw = myio.read_annotations(args.dev, K_neg=-1, prune_pos_cnt=-1)
        dev = myio.create_eval_batches(ids_corpus, dev_raw, padding_id,
                    pad_left=not args.average, merge=args.merge)
    if args.test:
        test_raw = myio.read_annotations(args.test, K_neg=-1, prune_pos_cnt=-1)
        test = myio.create_eval_batches(ids_corpus, test_raw, padding_id,
                    pad_left=not args.average, merge=args.merge)

    if args.train:
        start_time = time.time()
        train = myio.read_annotations(args.train)
        train_batches = myio.create_batches(ids_corpus, train, args.batch_size,
                                padding_id, pad_left = not args.average, merge=args.merge)
        say("{} to create batches\n".format(time.time()-start_time))
        say("{} batches, {} tokens in total, {} triples in total\n".format(
                len(train_batches),
                sum(len(x[0].ravel()) for x in train_batches),
                sum(len(x[1].ravel()) for x in train_batches)
            ))
        train_batches = None

        model = Model(args, embedding_layer,
                      weights=weights if args.reweight else None)
        model.ready()

        # set parameters using pre-trained network
        if args.load_pretrain:
            model.encoder.load_pretrained_parameters(args)

        model.train(
                ids_corpus,
                train,
                (dev, dev_raw) if args.dev else None,
                (test, test_raw) if args.test else None
            )
Beispiel #6
0
def main(args):
    raw_corpus = myio.read_corpus(args.corpus)
    embedding_layer = myio.create_embedding_layer(
        raw_corpus,
        n_d=args.hidden_dim,
        cut_off=args.cut_off,
        embs=load_embedding_iterator(args.embeddings)
        if args.embeddings else None)
    ids_corpus = myio.map_corpus(raw_corpus, embedding_layer)
    say("vocab size={}, corpus size={}\n".format(embedding_layer.n_V,
                                                 len(raw_corpus)))
    padding_id = embedding_layer.vocab_map["<padding>"]
    bos_id = embedding_layer.vocab_map["<s>"]
    eos_id = embedding_layer.vocab_map["</s>"]

    if args.reweight:
        weights = myio.create_idf_weights(args.corpus, embedding_layer)

    if args.dev:
        dev = myio.read_annotations(args.dev, K_neg=20, prune_pos_cnt=-1)
        dev = myio.create_eval_batches(ids_corpus, dev, padding_id)
    if args.test:
        test = myio.read_annotations(args.test, K_neg=20, prune_pos_cnt=-1)
        test = myio.create_eval_batches(ids_corpus, test, padding_id)

    if args.heldout:
        with open(args.heldout) as fin:
            heldout_ids = fin.read().split()
        heldout_corpus = dict(
            (id, ids_corpus[id]) for id in heldout_ids if id in ids_corpus)
        train_corpus = dict((id, ids_corpus[id]) for id in ids_corpus
                            if id not in heldout_corpus)
        heldout = myio.create_batches(heldout_corpus, [],
                                      args.batch_size,
                                      padding_id,
                                      bos_id,
                                      eos_id,
                                      auto_encode=True)
        heldout = [
            myio.create_one_batch(b1, t2, padding_id) for t1, b1, t2 in heldout
        ]
        say("heldout examples={}\n".format(len(heldout_corpus)))

    if args.train:
        model = Model(args,
                      embedding_layer,
                      weights=weights if args.reweight else None)

        start_time = time.time()
        train = myio.read_annotations(args.train)
        if not args.use_anno: train = []
        train_batches = myio.create_batches(ids_corpus,
                                            train,
                                            args.batch_size,
                                            model.padding_id,
                                            model.bos_id,
                                            model.eos_id,
                                            auto_encode=True)
        say("{} to create batches\n".format(time.time() - start_time))
        model.ready()

        model.train(ids_corpus if not args.heldout else train_corpus, train,
                    dev if args.dev else None, test if args.test else None,
                    heldout if args.heldout else None)
Beispiel #7
0
def main():
    print 'Starting at: {}\n'.format(datetime.now())
    raw_corpus = myio.read_corpus(args.corpus)
    embedding_layer = create_embedding_layer(
        n_d=200,
        embs=load_embedding_iterator(args.embeddings),
        only_words=False if args.use_embeddings else True,
        trainable=args.trainable
    )
    ids_corpus = myio.map_corpus(raw_corpus, embedding_layer, max_len=args.max_seq_len)
    print("vocab size={}, corpus size={}\n".format(
            embedding_layer.n_V,
            len(raw_corpus)
        ))
    padding_id = embedding_layer.vocab_map["<padding>"]

    if args.reweight:
        weights = myio.create_idf_weights(args.corpus, embedding_layer)

    if args.layer.lower() == "lstm":
        from models import LstmQR as Model
    elif args.layer.lower() in ["bilstm", "bigru"]:
        from models import BiRNNQR as Model
    elif args.layer.lower() == "cnn":
        from models import CnnQR as Model
    elif args.layer.lower() == "gru":
        from models import GruQR as Model
    else:
        raise Exception("no correct layer given")

    if args.dev:
        dev = myio.read_annotations(args.dev, K_neg=-1, prune_pos_cnt=-1)
        dev = myio.create_eval_batches(ids_corpus, dev, padding_id, pad_left=False)
    if args.test:
        test = myio.read_annotations(args.test, K_neg=-1, prune_pos_cnt=-1)
        test = myio.create_eval_batches(ids_corpus, test, padding_id, pad_left=False)

    model = Model(args, embedding_layer, weights=weights if args.reweight else None)
    model.ready()

    print 'total (non) trainable params: ', model.num_parameters()

    if args.load_pre_trained_part:
        # need to remove the old assigns to embeddings
        model.init_assign_ops = model.load_pre_trained_part(args.load_pre_trained_part)
    print '\nmodel init_assign_ops: {}\n'.format(model.init_assign_ops)

    if args.train:
        start_time = time.time()
        train = myio.read_annotations(args.train)
        train_batches = myio.create_batches(
            ids_corpus, train, args.batch_size, padding_id, pad_left=False
        )

        print("{} to create batches\n".format(time.time()-start_time))
        print("{} batches, {} tokens in total, {} triples in total\n".format(
                len(train_batches),
                sum(len(x[0].ravel())+len(x[1].ravel()) for x in train_batches),
                sum(len(x[2].ravel()) for x in train_batches)
            ))

        model.train_model(
            ids_corpus,
            train,
            dev=dev if args.dev else None,
            test=test if args.test else None
        )
    print '\nEnded at: {}'.format(datetime.now())
def main(args):
    raw_corpus = myio.read_corpus(args.corpus, args.translations or None,
                                  args.translatable_ids or None,
                                  args.generated_questions_train or None)

    generated_questions_eval = myio.read_generated_questions(
        args.generated_questions)

    embedding_layer = None
    if args.trainable_embeddings == 1:
        embedding_layer = myio.create_embedding_layer(
            raw_corpus,
            n_d=args.hidden_dim,
            cut_off=args.cut_off,
            embs=load_embedding_iterator(args.embeddings)
            if args.embeddings else None,
            fix_init_embs=False)
    else:
        embedding_layer = myio.create_embedding_layer(
            raw_corpus,
            n_d=args.hidden_dim,
            cut_off=args.cut_off,
            embs=load_embedding_iterator(args.embeddings)
            if args.embeddings else None)
    ids_corpus = myio.map_corpus(raw_corpus,
                                 embedding_layer,
                                 max_len=args.max_seq_len,
                                 generated_questions=generated_questions_eval)
    say("vocab size={}, corpus size={}\n".format(embedding_layer.n_V,
                                                 len(raw_corpus)))
    padding_id = embedding_layer.vocab_map["<padding>"]

    if args.reweight:
        weights = myio.create_idf_weights(args.corpus, embedding_layer)

    if args.dev:
        # dev = myio.read_annotations(args.dev, K_neg=-1, prune_pos_cnt=-1)
        dev = myio.read_annotations(args.dev,
                                    K_neg=args.dev_pool_size,
                                    prune_pos_cnt=-1)
        dev = myio.create_eval_batches(ids_corpus,
                                       dev,
                                       padding_id,
                                       pad_left=not args.average)
    if args.test:
        test = myio.read_annotations(args.test, K_neg=-1, prune_pos_cnt=-1)
        test = myio.create_eval_batches(ids_corpus,
                                        test,
                                        padding_id,
                                        pad_left=not args.average)

    if args.train:
        start_time = time.time()
        train = myio.read_annotations(
            args.train, training_data_percent=args.training_data_percent)
        train_batches = myio.create_batches(ids_corpus,
                                            train,
                                            args.batch_size,
                                            padding_id,
                                            pad_left=not args.average,
                                            include_generated_questions=True)
        say("{} to create batches\n".format(time.time() - start_time))
        say("{} batches, {} tokens in total, {} triples in total\n".format(
            len(train_batches),
            sum(len(x[0].ravel()) + len(x[1].ravel()) for x in train_batches),
            sum(len(x[2].ravel()) for x in train_batches)))
        train_batches = None

        model = Model(args,
                      embedding_layer,
                      weights=weights if args.reweight else None)
        # print('args.average: '+args.average)
        model.ready()

        # # # set parameters using pre-trained network
        if args.do_train == 1:
            if args.load_pretrain:
                model.load_pretrained_parameters(args)

            model.train(ids_corpus, train, dev if args.dev else None,
                        test if args.test else None)

        # AVERAGE THE PREDICTIONS OBTAINED BY RUNNING THE MODEL 10 TIMES
        if args.do_evaluate == 1:
            model.load_pretrained_parameters(args)
            # model.set_model(model.load_model(args.load_pretrain))
            for i in range(1):
                r = model.just_eval(dev if args.dev else None,
                                    test if args.test else None)

        # ANALYZE the results
        if len(args.analyze_file.strip()) > 0:
            model.load_pretrained_parameters(args)
            file_name = args.analyze_file.strip(
            )  # 'AskUbuntu.Rcnn_analysis3.gt(es)-gt.txt'
            model.analyze(file_name, embedding_layer, dev)
Beispiel #9
0
    raw_corpus = myio.read_corpus(args.corpus_w_tags, with_tags=True)

    embedding_layer = create_embedding_layer(n_d=10,
                                             embs=load_embedding_iterator(
                                                 args.embeddings),
                                             only_words=False)

    with tf.Session() as sess:

        myqrapi = TPAPI(args.model, embedding_layer, sess, len(label_tags),
                        args.layer)

        embedding_layer = myqrapi.model.embedding_layer

        ids_corpus = myio.map_corpus(raw_corpus,
                                     embedding_layer,
                                     label_tags,
                                     max_len=args.max_seq_len)

        print("vocab size={}, corpus size={}\n".format(embedding_layer.n_V,
                                                       len(raw_corpus)))

        padding_id = embedding_layer.vocab_map["<padding>"]

        say("vocab size={}, corpus size={}\n".format(embedding_layer.n_V,
                                                     len(raw_corpus)))

        eval_batches = create_batches(df, ids_corpus, 'dev',
                                      myqrapi.model.args.batch_size,
                                      padding_id)
        print 'DEV evaluation:'
        print '{} batches.'.format(len(eval_batches))
Beispiel #10
0
    argparser.add_argument("--full_results_file", type=str,
                           default="")  # to write in
    argparser.add_argument("--results_file", type=str,
                           default="")  # to write in
    argparser.add_argument("--layer", type=str, default="lstm")
    args = argparser.parse_args()
    print '\n', args, '\n'

    with tf.Session() as sess:

        myqrapi = QRAPI(args.model, args.corpus, args.embeddings, sess,
                        args.layer)

        raw_corpus = myio.read_corpus(args.corpus)
        embedding_layer = myqrapi.model.embedding_layer
        ids_corpus = myio.map_corpus(raw_corpus, embedding_layer, max_len=100)
        test = myio.read_annotations(args.test_file,
                                     K_neg=-1,
                                     prune_pos_cnt=-1)
        test = create_eval_batches(ids_corpus,
                                   test,
                                   myqrapi.model.padding_id,
                                   pad_left=not myqrapi.model.args.average)

        testmap, testmrr, testpat1, testpat5, rank_labels, rank_ids, qids, rank_scores = myqrapi.evaluate(
            test, sess)

        if args.full_results_file:
            with open(args.full_results_file, 'w') as f:
                for i, (_, _, labels, pid, qids) in enumerate(test):
                    print_qids_similar = [