Exemplo n.º 1
0
def main(_):

    data_dir = cfg.DATA_DIR
    vocab, rev_vocab = initialize_vocab(FLAGS.vocab)

    # gpu setting
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    tf.reset_default_graph()

    encoder = Encoder(size=2 * cfg.lstm_num_hidden)
    decoder = Decoder(output_size=2 * cfg.lstm_num_hidden)
    qa = QASystem(encoder, decoder, FLAGS.embed)

    with tf.Session(config=config) as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        load_train_dir = get_normalized_train_dir(FLAGS.ckpt)
        initialize_model(sess, qa, load_train_dir)
        print(
            '*********************************************************************'
        )
        print(
            "Welcome! You can use this to explore the behavior of the model.")
        print(
            '*********************************************************************'
        )

        while True:
            print('-------------------')
            print('Input the context: ')
            print('-------------------')
            sentence = raw_input()
            print('-------------------')
            print('Input the question: ')
            print('-------------------')
            query = raw_input()
            raw_context = nltk.word_tokenize(sentence)
            context = sentence_to_token_ids(sentence,
                                            vocab,
                                            tokenizer=nltk.word_tokenize)
            question = sentence_to_token_ids(query,
                                             vocab,
                                             tokenizer=nltk.word_tokenize)
            context_in = mask_input(context, cfg.context_max_len)
            question_in = mask_input(question, cfg.question_max_len)
            start, end = qa.answer(sess, [context_in], [question_in])
            answer = ' '.join(raw_context[start[0]:end[0] + 1])
            print('==========================================')
            print('ANSWER: {}'.format(answer))
            print('==========================================')
Exemplo n.º 2
0
def main():
    """
    Entry point for training

    Load dataset according to args and train model
    """
    args = Argparser().args
    torch.backends.cudnn.benchmark = True

    data_path = f'./{args.input_dir}/{args.data_dir}/'
    dataset = ShapeNetDataset(data_path)
    data_loader = DataLoader(dataset=dataset,
                             batch_size=args.batch_size,
                             num_workers=torch.cuda.device_count() *
                             4 if args.device.upper() == 'CUDA' else 4,
                             shuffle=True,
                             drop_last=True)
    d_path = f'./{args.models_path}/{args.obj}_d.tar'
    g_path = f'./{args.models_path}/{args.obj}_g.tar'
    d_model, g_model, d_optim, g_optim = initialize_model(args, d_path, g_path)

    # Always save model if something goes wrong, disconnects or what not
    try:
        gan = '' if args.unpac else 'Pac'
        two = '' if args.unpac else '2'
        print(
            f'Training {gan}{args.gan_type.upper()}{two} on {args.device.upper()}'
        )
        training_loop(data_loader, d_model, g_model, d_optim, g_optim, args)
    finally:
        save_model(args.models_path, d_path, g_path, d_model, g_model, d_optim,
                   g_optim, args)
Exemplo n.º 3
0
def main(model_path, tests):
    """Run and print tests for given `test`-list on given model"""
    adj_model = data.build_adj_model()
    gold_standard = data.load_gold_standard(adj_model)
    model, _optimizer = train.initialize_model(model_path)
    with torch.set_grad_enabled(False):
        for test in tests:
            if not adj_model.has_adj(test):
                print("No embedding for '%s'. Skipping" % test)
                continue

            header = "Antonym predictions for '%s'" % test
            header += ":" if test in gold_standard else " (Not in gold standard):"
            print(header)

            gold_antonyms = gold_standard[
                test] if test in gold_standard else []
            ant_pred = predict_antonym_emb(model, adj_model, test)
            predictions = [
                a.name for a in adj_model.adjs_from_vector(ant_pred, count=5)
            ]
            output = []
            for prediction in predictions:
                if prediction in gold_antonyms:
                    output.append("[%s]" % prediction)
                else:
                    output.append("%s" % prediction)
            print("\t" + ", ".join(output))
Exemplo n.º 4
0
def main():
    dataload = DataLoader()
    vocab, rev_vocab = initialize_vocab(pjoin(cfg.DATA_DIR, cfg.vocab_file))
    config = tf.ConfigProto(device_count={'GPU': 0})
    #config.gpu_options.allow_growth = True

    tf.reset_default_graph()

    encoder = Encoder(size=2 * cfg.lstm_num_hidden)
    decoder = Decoder(output_size=2 * cfg.lstm_num_hidden)
    qa = QASystem(encoder, decoder, cfg.embed_dir)

    c1 = open(pjoin(cfg.DATA_DIR, 'test.context'), 'r').read().split('\n')
    q1 = open(pjoin(cfg.DATA_DIR, 'test.question'), 'r').read().split('\n')
    a1 = open(pjoin(cfg.DATA_DIR, 'test.answer'), 'r').read().split('\n')

    with tf.Session(config=config) as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        load_train_dir = pjoin(cfg.output, cfg.train_dir)
        initialize_model(sess, qa, load_train_dir)
        ans = []
        f1 = []
        for i, data in enumerate(c1):
            print(i)
            sentence = c1[i]
            query = q1[i]
            raw_context = nltk.word_tokenize(sentence)
            len(raw_context)
            context = dataload.sentence_to_token_ids(
                sentence, vocab, tokenizer=nltk.word_tokenize)
            question = dataload.sentence_to_token_ids(
                query, vocab, tokenizer=nltk.word_tokenize)
            context_in = mask_input(context, cfg.context_max_len)
            question_in = mask_input(question, cfg.question_max_len)
            start, end = qa.answer(sess, [context_in], [question_in],
                                   train=False)
            answer = ' '.join(raw_context[start[0]:(end[0] + 1)])
            f1.append(qa.f1_score(answer, a1[i]))
            print("QUESTION: " + query)
            print("ANSWER: " + answer)
            if i == 100:
                break
            ans.append(answer)
    return ans, f1
def main(_):

    data_dir = cfg.DATA_DIR
    vocab, rev_vocab = initialize_vocab(FLAGS.vocab)

    # gpu setting
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    tf.reset_default_graph()

    encoder = Encoder(size=2 * cfg.lstm_num_hidden)
    decoder = Decoder(output_size=2 * cfg.lstm_num_hidden)
    qa = QASystem(encoder, decoder, FLAGS.embed)

    with tf.Session(config=config) as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        load_train_dir = get_normalized_train_dir(FLAGS.ckpt)
        initialize_model(sess, qa, load_train_dir)
        print('*********************************************************************')
        print("Welcome! You can use this to explore the behavior of the model.")
        print('*********************************************************************')

        while True:
            print('-------------------')
            print('Input the context: ')
            print('-------------------')
            sentence = raw_input()
            print('-------------------')
            print('Input the question: ')
            print('-------------------')
            query = raw_input()
            raw_context = nltk.word_tokenize(sentence)
            context = sentence_to_token_ids(sentence, vocab, tokenizer=nltk.word_tokenize)
            question = sentence_to_token_ids(query, vocab, tokenizer=nltk.word_tokenize)
            context_in = mask_input(context, cfg.context_max_len)
            question_in = mask_input(question, cfg.question_max_len)
            start, end = qa.answer(sess, [context_in], [question_in])
            answer = ' '.join(raw_context[start[0]: end[0] + 1])
            print('==========================================')
            print('ANSWER: {}'.format(answer))
            print('==========================================')
Exemplo n.º 6
0
def resume_training(path_results, dataset_name, model_num):
	
	train.log('RESUMING TRAINING MODEL {}'.format(model_num))
	
	# %%
	
	print('RESUMING TRAINING MODEL {}'.format(model_num))
	model_folder = train.train_utils.get_model_path(path_results, dataset_name, model_num)
	train_params = json.load(open(model_folder + 'train_params.json', 'r'))
	print('*** Train params', train_params)
	train.log('TRAIN PARAMS: {}'.format(train_params))
	
	lines_train, lines_val, _ = train.load_data_and_initialize_training(resume_training = True, **train_params)
	
	last_weights = sorted([ f for f in os.listdir(model_folder + 'weights/') if 'trained' not in f ])[-1]
	last_epoch = int([ s for s in last_weights.split('-') if s.startswith('ep') ][0][2:])
	path_weigths = model_folder + 'weights/' + last_weights
	freeze_body = train_params['freeze_body'] if last_epoch < train_params['frozen_epochs'] else 0
	
	
	init_lr = get_init_lr(model_folder, last_epoch)
#	init_lr = 1e-8
	
	print(' *  Restoring training in epoch {} with lr {}'.format(last_epoch, init_lr))
	print(' *  weights:', path_weigths)
	train.log('RESUMING ON EPOCH {} WITH LR {:.3f}'.format(last_epoch, init_lr))

	
	model, callbacks, anchors, num_classes, class_names = train.initialize_model(train_params['path_classes'], 
					  train_params['path_anchors'], train_params['path_model'], 
					  train_params['input_shape'], freeze_body, 
					  path_weigths, train_params['path_annotations'], train_params['eval_val_score'], 
					  train_params['td_len'], train_params['mode'], 
					  train_params['spp'], train_params['loss_percs'],
					  mAP_lr=train_params['mAP_lr'])
#	model, callbacks, anchors, num_classes, class_names = train.initialize_model(**train_params)

	if train_params['mAP_metric'] or train_params['mAP_lr']:
		callbacks['mAP_callback'] = LambdaCallback(on_epoch_end=train.mAP_callback(model, train_params, class_names))

	init_epoch = last_epoch
	
	if init_epoch < train_params['frozen_epochs']:
		train.train_frozen_stage(model, callbacks, lines_train, lines_val, anchors, num_classes, 
								   init_epoch=init_epoch, init_lr=init_lr, **train_params)
	train.train_final_stage(model, callbacks, lines_train, lines_val, anchors, num_classes, 
								   init_epoch=init_epoch, init_lr=init_lr, **train_params)
	
#	train_utils.remove_worst_weights(train_params['path_model'])
	
	train.evaluate_training(train_params, best_weights=None, 
				   score_train=train_params['eval_train_score'], 
				   score_val=train_params['eval_val_score'])

	return True
def main(model_path, device=DEVICE):
    """
    Loads the adj_model and EncoderDecoder model from a checkpoint.
    The EncoderDecoder model is then evaluated on both GRE question
    answer set and on GRE/LB P1/P5 gold standard antonym prediction task.
    """
    print("Building dataset and adjectives")
    adj_model = data.build_adj_model()
    model, _optimizer = train.initialize_model(model_path, device)
    model.eval()
    evaluate(model, adj_model, device)
def main():
    parser = create_arg_parser()
    args = parser.parse_args()
    load_config(args)

    emb_type = 'Word2VecWiki'

    dl = DatasetLoading(emb_type,
                        args.emb_path,
                        exo1_word='僕',
                        exo2_word='おまえ',
                        exoX_word='これ')
    dl.making_intra_df()

    trains_dict, _, tests_dict = dl.split_each_domain('intra')

    if args.model == 'CPS' or args.model == 'MIX':
        statistics_of_each_case_type = train.init_statistics_of_each_case_type(
            trains_dict, args.case, args.media)
    else:
        statistics_of_each_case_type = None

    bilstm = train.initialize_model(
        args.gpu,
        vocab_size=len(dl.wv.index2word),
        v_vec=dl.wv.vectors,
        dropout_ratio=0.2,
        n_layers=3,
        model=args.model,
        statistics_of_each_case_type=statistics_of_each_case_type)

    pprint(args.__dict__)
    val_results = max_f1_epochs_of_vals(args.load_dir)
    results = {}
    logs = {}
    domain = 'All'
    epoch = val_results[domain]['epoch']
    load_model(epoch, bilstm, args.load_dir, args.gpu)
    _results, _ = run(tests_dict, bilstm, 1, args)
    results[domain] = _results[domain]
    results[domain]['epoch'] = epoch
    for domain in tests_dict.keys():
        epoch = val_results[domain]['epoch']
        load_model(epoch, bilstm, args.load_dir, args.gpu)
        _results, _logs = run(tests_dict, bilstm, 1, args)
        results[domain] = _results[domain]
        results[domain]['epoch'] = epoch
        logs[domain] = _logs[domain]
    dump_dict(results, args.load_dir, 'test_logs')
    dump_predict_logs(logs, args.load_dir)
Exemplo n.º 9
0
def main():
    parser = create_arg_parser()
    args = parser.parse_args()
    load_config(args)

    emb_type = 'Word2VecWiki'

    dl = DatasetLoading(emb_type,
                        args.emb_path,
                        exo1_word='僕',
                        exo2_word='おまえ',
                        exoX_word='これ')
    dl.making_intra_df()

    trains_dict, vals_dict, _ = dl.split_each_domain('intra')

    if args.model == 'MIX':
        statistics_of_each_case_type = train.init_statistics_of_each_case_type(
            trains_dict, args.case, args.media)
    else:
        statistics_of_each_case_type = None

    bilstm = train.initialize_model(
        args.gpu,
        vocab_size=len(dl.wv.index2word),
        v_vec=dl.wv.vectors,
        dropout_ratio=0.2,
        n_layers=3,
        model=args.model,
        statistics_of_each_case_type=statistics_of_each_case_type)

    pprint(args.__dict__)
    val_results = test.max_f1_epochs_of_vals(args.load_dir)

    for domain in ['OC', 'OY', 'OW', 'PB', 'PM', 'PN']:
        print(f'--- start {domain} fine tuning ---')
        dump_dict(args.__dict__, args.dump_dir + f'/{ft_domain}/{args.case}',
                  'args')
        epoch = val_results[domain]['epoch']
        load_model(epoch, bilstm, args.load_dir, args.gpu)

        #lr = 0.0001にしてもいいかも
        run(trains_dict[domain],
            vals_dict,
            bilstm,
            args,
            ft_domain=domain,
            lr=0.0001,
            batch_size=64)
Exemplo n.º 10
0
def main():
    parser = create_arg_parser()
    args = parser.parse_args()
    load_config(args)

    dl = DatasetLoading(args.emb_type,
                        args.emb_path,
                        media=args.media,
                        exo1_word=args.exo1_word,
                        exo2_word=args.exo2_word,
                        exoX_word=args.exoX_word)
    if args.dataset_type == 'intra':
        dl.making_intra_df()
    elif args.dataset_type == 'inter':
        dl.making_inter_df()
    else:
        raise ValueError()

    _, _, tests = dl.split(args.dataset_type)

    bilstm = train.initialize_model(args.gpu,
                                    vocab_size=len(dl.wv.index2word),
                                    v_vec=dl.wv.vectors,
                                    emb_requires_grad=args.emb_requires_grad,
                                    args=args)

    pprint(args.__dict__)
    val_results = max_f1_epochs_of_vals(args.load_dir)
    results = {}
    logs = {}
    domain = 'All'
    epoch = val_results[domain]['epoch']
    load_model(epoch, bilstm, args.load_dir, args.gpu)
    _results, _ = run(tests, bilstm, args)
    results[domain] = _results[domain]
    results[domain]['epoch'] = epoch
    for domain in args.media:
        epoch = val_results[domain]['epoch']
        load_model(epoch, bilstm, args.load_dir, args.gpu)
        _results, _logs = run(tests, bilstm, args)
        results[domain] = _results[domain]
        results[domain]['epoch'] = epoch
        logs[domain] = _logs[domain]
    dump_dict(results, args.load_dir, 'test_logs')
    dump_predict_logs(logs, args.load_dir)
def tuning(trains, vals, wv, args, trial):
    # num of lstm layer
    n_layers = trial.suggest_int('n_layers', 1, 3)
    # dropout_rate
    dropout_ratio = trial.suggest_categorical('dropout_rate',
                                              [0, 0.1, 0.2, 0.3])

    bilstm = initialize_model(args.gpu,
                              vocab_size=len(wv.index2word),
                              v_vec=wv.vectors,
                              dropout_ratio=dropout_ratio,
                              n_layers=n_layers,
                              model='Base')

    lr = trial.suggest_categorical('learning_rate', [1e-5, 1e-4, 1e-3, 1e-2])
    batch_size = trial.suggest_categorical('batch_size', [16, 32, 64])
    F1 = train(trains, vals, bilstm, args, lr=lr, batch_size=batch_size)
    return F1
def main():
    parser = create_arg_parser()
    args = parser.parse_args()
    load_config(args)

    dl = DatasetLoading(args.emb_type, args.emb_path, media=args.media)
    if args.dataset_type == 'intra':
        dl.making_intra_df()
    elif args.dataset_type == 'inter':
        dl.making_inter_df()
    else:
        raise ValueError()

    trains, vals, _ = dl.split(args.dataset_type)
    bilstm = train.initialize_model(args.gpu, vocab_size=len(dl.wv.index2word), v_vec=dl.wv.vectors, emb_requires_grad=args.emb_requires_grad, args=args)

    pprint(args.__dict__)

    run(trains, vals, bilstm, args)
Exemplo n.º 13
0
        path=config["load_checkpoint_path"],
        train_on_gpu=train_on_gpu,
        multi_gpu=multi_gpu)

    ## Reset scheduler
    if not model is None and config["scheduler_step"] and config[
            "gamma"] and not config["load_schedule"]:
        model.scheduler = lr_scheduler.StepLR(
            optimizer,
            step_size=config["scheduler_step"],
            gamma=config["gamma"])

# If model was not loaded successfully, create new one
if model is None:
    model = train.initialize_model(config["model"],
                                   n_classes=classes,
                                   train_on_gpu=train_on_gpu,
                                   multi_gpu=multi_gpu)
    model.class_to_idx = transformed_dataset.class_to_idx
    model.idx_to_class = transformed_dataset.idx_to_class
    optimizer = optim.Adam(model.parameters())
    model.scheduler = None
    # Decay LR by a factor of 0.7 every 5 epochs
    if config["scheduler_step"] and config["gamma"]:
        model.scheduler = lr_scheduler.StepLR(
            optimizer,
            step_size=config["scheduler_step"],
            gamma=config["gamma"])

## Train loop
trainiter = iter(dataloaders['train'])
features, labels = next(trainiter)
def main(_):
    '''Check the Config.py to set up models pathes to be ensembled.'''

    data_dir = cfg.DATA_DIR
    set_names = cfg.set_names
    suffixes = cfg.suffixes
    dataset = mask_dataset(data_dir, set_names, suffixes)
    raw_answers = read_raw_answers(data_dir)

    vocab_path = pjoin(data_dir, cfg.vocab_file)
    vocab, rev_vocab = initialize_vocab(vocab_path)

    if not os.path.exists(cfg.log_dir):
        os.makedirs(cfg.log_dir)
    if not os.path.exists(cfg.cache_dir):
        os.makedirs(cfg.cache_dir)
    if not os.path.exists(cfg.fig_dir):
        os.makedirs(cfg.fig_dir)

    c_time = time.strftime('%Y%m%d_%H%M', time.localtime())
    file_handler = logging.FileHandler(pjoin(cfg.log_dir, 'ensemble_log' + c_time + '.txt'))
    logging.getLogger().addHandler(file_handler)

    model_pathes = cfg.model_pathes
    num_m = len(model_pathes)
    train_s = np.zeros((cfg.num_eval, num_m), dtype=np.int32)
    train_e = np.zeros((cfg.num_eval, num_m), dtype=np.int32)
    val_s = np.zeros((cfg.num_eval, num_m), dtype=np.int32)
    val_e = np.zeros((cfg.num_eval, num_m), dtype=np.int32)

    # gpu setting
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    for i in xrange(num_m):
        tf.reset_default_graph()
        with tf.Session(config=config) as sess:
            encoder = Encoder(size=2 * cfg.lstm_num_hidden)
            decoder = Decoder(output_size=2 * cfg.lstm_num_hidden)
            qa = QASystem(encoder, decoder)
            init = tf.global_variables_initializer()
            sess.run(init)
            load_train_dir = get_normalized_train_dir(model_pathes[i])
            initialize_model(sess, qa, load_train_dir)

            ts, te, vs, ve = qa.evaluate_answer(sess, dataset, raw_answers, rev_vocab,
                                                log=True,
                                                ensemble=True,
                                                training=True,
                                                sample=cfg.num_eval)
            train_s[:, i] = ts
            train_e[:, i] = te
            val_s[:, i] = vs
            val_e[:, i] = ve

            if i == num_m - 1:
                # np.save('cache/ensemble.npy', [train_s, train_e, val_s, val_e])
                train_s = bin_count(train_s)
                train_e = bin_count(train_e)
                val_s = bin_count(val_s)
                val_e = bin_count(val_e)
                qa.evaluate_answer(sess, dataset, raw_answers, rev_vocab,
                                   log=True,
                                   training=True,
                                   sendin=(train_s, train_e, val_s, val_e),
                                   sample=cfg.num_eval
                                   )
Exemplo n.º 15
0
#%%
import pyro
from train import *
import train
import FINNPlot
def plot_itemidx_array(arr,nrow=None):
    if nrow is None:
        nrow = arr.size()[1]
    finnkoder = [ind2val['itemId'][r.item()] for r in arr.flatten()]
    return FINNPlot.add_image_line(finnkoder, nrow=nrow)

param, ind2val, itemattr, dataloaders, sim = train.load_data()
model, guide = train.initialize_model(param,ind2val, itemattr, dataloaders)
param, ind2val, trainer = train.train(param, ind2val, itemattr, dataloaders, sim, model, guide, run_training_loop=False)

#%%
pyro.clear_param_store()
m = "Jon-Arya-gru-hier-clip1000:lik=0.026:gui=7.18366887024712:lea=0.000434:pri=7.14:pri=0.01:pri=0.01:na" # -20M-steps
pyro.get_param_store().load(f"checkpoints/{m}.pyro", map_location=param['device'])
guide.initialize_parameters()
#%%
dl = iter(dataloaders['train'])
dummybatch = next(dl)
dummybatch['phase_mask'] = (dummybatch['mask_type']==1).float()
dummybatch = {key: val.long().to(param.get("device")) for key, val in dummybatch.items()}

#%% Distirbution of click probabilities over slate at a given time t for one user
#h0batch_fixed = par['h0-batch']
for i in range(10):
    par=guide(dummybatch)
    #par['h0-batch'] = h0batch_fixed
Exemplo n.º 16
0
def main(_):
    '''Check the Config.py to set up models pathes to be ensembled.'''

    data_dir = cfg.DATA_DIR
    set_names = cfg.set_names
    suffixes = cfg.suffixes
    dataset = mask_dataset(data_dir, set_names, suffixes)
    raw_answers = read_raw_answers(data_dir)

    vocab_path = pjoin(data_dir, cfg.vocab_file)
    vocab, rev_vocab = initialize_vocab(vocab_path)

    if not os.path.exists(cfg.log_dir):
        os.makedirs(cfg.log_dir)
    if not os.path.exists(cfg.cache_dir):
        os.makedirs(cfg.cache_dir)
    if not os.path.exists(cfg.fig_dir):
        os.makedirs(cfg.fig_dir)

    c_time = time.strftime('%Y%m%d_%H%M', time.localtime())
    file_handler = logging.FileHandler(
        pjoin(cfg.log_dir, 'ensemble_log' + c_time + '.txt'))
    logging.getLogger().addHandler(file_handler)

    model_pathes = cfg.model_pathes
    num_m = len(model_pathes)
    train_s = np.zeros((cfg.num_eval, num_m), dtype=np.int32)
    train_e = np.zeros((cfg.num_eval, num_m), dtype=np.int32)
    val_s = np.zeros((cfg.num_eval, num_m), dtype=np.int32)
    val_e = np.zeros((cfg.num_eval, num_m), dtype=np.int32)

    # gpu setting
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    for i in xrange(num_m):
        tf.reset_default_graph()
        with tf.Session(config=config) as sess:
            encoder = Encoder(size=2 * cfg.lstm_num_hidden)
            decoder = Decoder(output_size=2 * cfg.lstm_num_hidden)
            qa = QASystem(encoder, decoder)
            init = tf.global_variables_initializer()
            sess.run(init)
            load_train_dir = get_normalized_train_dir(model_pathes[i])
            initialize_model(sess, qa, load_train_dir)

            ts, te, vs, ve = qa.evaluate_answer(sess,
                                                dataset,
                                                raw_answers,
                                                rev_vocab,
                                                log=True,
                                                ensemble=True,
                                                training=True,
                                                sample=cfg.num_eval)
            train_s[:, i] = ts
            train_e[:, i] = te
            val_s[:, i] = vs
            val_e[:, i] = ve

            if i == num_m - 1:
                # np.save('cache/ensemble.npy', [train_s, train_e, val_s, val_e])
                train_s = bin_count(train_s)
                train_e = bin_count(train_e)
                val_s = bin_count(val_s)
                val_e = bin_count(val_e)
                qa.evaluate_answer(sess,
                                   dataset,
                                   raw_answers,
                                   rev_vocab,
                                   log=True,
                                   training=True,
                                   sendin=(train_s, train_e, val_s, val_e),
                                   sample=cfg.num_eval)
Exemplo n.º 17
0
def main():
    parser = create_arg_parser()
    args = parser.parse_args()
    load_config(args, args.load_FAdir)
    load_config(args, args.load_CPSdir)

    emb_type = 'Word2VecWiki'

    dl = DatasetLoading(emb_type,
                        args.emb_path,
                        exo1_word='僕',
                        exo2_word='おまえ',
                        exoX_word='これ')
    dl.making_intra_df()

    trains_dict, _, tests_dict = dl.split_each_domain('intra')

    statistics_of_each_case_type = train.init_statistics_of_each_case_type(
        trains_dict, args.case, args.media)

    bilstm_FT = train.initialize_model(args.gpu,
                                       vocab_size=len(dl.wv.index2word),
                                       v_vec=dl.wv.vectors,
                                       dropout_ratio=0.2,
                                       n_layers=3,
                                       model='Base',
                                       statistics_of_each_case_type=None)
    bilstm_FA = train.initialize_model(args.gpu,
                                       vocab_size=len(dl.wv.index2word),
                                       v_vec=dl.wv.vectors,
                                       dropout_ratio=0.2,
                                       n_layers=3,
                                       model='FA',
                                       statistics_of_each_case_type=None)
    bilstm_CPS = train.initialize_model(
        args.gpu,
        vocab_size=len(dl.wv.index2word),
        v_vec=dl.wv.vectors,
        dropout_ratio=0.2,
        n_layers=3,
        model='CPS',
        statistics_of_each_case_type=statistics_of_each_case_type)

    results = {}
    logs = {}
    # domain = 'All'

    pprint(args.__dict__)
    for domain in tests_dict.keys():
        load_config(args, args.load_FTdir + f'/{domain}/{args.case}')
        val_results_FT = max_f1_epochs_of_vals(args.load_FTdir +
                                               f'/{domain}/{args.case}')
        epoch_FT = val_results_FT[domain]['epoch']
        val_results_FA = max_f1_epochs_of_vals(args.load_FAdir)
        epoch_FA = val_results_FA[domain]['epoch']
        val_results_CPS = max_f1_epochs_of_vals(args.load_CPSdir)
        epoch_CPS = val_results_CPS[domain]['epoch']

        load_model(epoch_FT, bilstm_FT,
                   args.load_FTdir + f'/{domain}/{args.case}', args.gpu)
        load_model(epoch_FA, bilstm_FA, args.load_FAdir, args.gpu)
        load_model(epoch_CPS, bilstm_CPS, args.load_CPSdir, args.gpu)

        _results, _logs = run(tests_dict, bilstm_FT, bilstm_FA, bilstm_CPS, 1,
                              args)
        results[domain] = _results[domain]
        results[domain]['epoch_FT'] = epoch_FT
        results[domain]['epoch_FA'] = epoch_FA
        results[domain]['epoch_CPS'] = epoch_CPS
        logs[domain] = _logs[domain]
    dump_dict(results, args.dump_dir + f'/{args.case}', 'test_logs')
    dump_predict_logs(logs, args.dump_dir + f'/{args.case}')