def _build(self):
     g = tf.Graph()
     with g.as_default():
         # build language model
         language_model = LanguageModel()
         language_model.build()
         language_model.set_cache_dir('test_empty')
         sess = tf.Session()
         language_model.set_session(sess)
         language_model.setup_model()
     self.language_model = language_model
def construct_language_model_from_tabnak_collection(dictionary):
    file = open("data/tabnakNewsCollection.json", 'r')
    normalizer = SCNormalizer()
    language_model = LanguageModel(dictionary)
    i = 0
    for line in file:
        try:
            data = json.loads(line)
            content = data['title'] + " " + data['content']
            normalized_content = normalizer.normalize(content)
            word_tokenized = word_tokenize(normalized_content)
            for word in word_tokenized:
                word = word.replace("_", PersianTools().HalfSpace)
                language_model.add(word)
            i += 1
            if i % 1000 == 0:
                print(i)
        except:
            print("error accured reading json file")
    language_model.export_to_file()
    return language_model
Пример #3
0
'''

new = True
another = True

print('\nTop Comment Generator')
print('@ellismiranda, 2018')

while another:
    if new:
        subreddit = input(
            '\nWhat subreddit are we getting the top post from? r/')
        new = False
        a = get_hot_posts(subreddit)[0]
        post = generate_post_reference(a)
        tops = get_post_top_comments(post)
        model = LanguageModel()
        model.train(tops)
    example_sentence = model.generate_sentence()
    print('\nPost this and enjoy the free karma!:',
          format_comment(example_sentence))
    another = True if input(
        'Generate another top comment for the top post of r/{}? [y/n] '.format(
            subreddit)) == 'y' else False
    if not another:
        new = True if input(
            'Generate top comments for another subreddit? [y/n]'
        ) == 'y' else False
        another = True if new else False
def ivqa_decoding_beam_search(checkpoint_path=None):
    model_config = ModelConfig()
    method = FLAGS.method
    res_file = 'result/bs_cand_for_vis.json'
    # Get model
    model_fn = get_model_creation_fn('VAQ-Var')
    create_fn = create_reader('VAQ-VVIS', phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval',
                                    top_ans_file='../VQA-tensorflow/data/vqa_trainval_top2000_answers.txt')

    # get data reader
    subset = 'kpval'
    reader = create_fn(batch_size=1, subset=subset,
                       version=FLAGS.test_version)

    exemplar = ExemplarLanguageModel()

    if checkpoint_path is None:
        if FLAGS.checkpoint_dir:
            ckpt_dir = FLAGS.checkpoint_dir
        else:
            ckpt_dir = FLAGS.checkpoint_pat % (FLAGS.version, FLAGS.model_type)
        # ckpt_dir = '/import/vision-ephemeral/fl302/models/v2_kpvaq_VAQ-RL/'
        ckpt = tf.train.get_checkpoint_state(ckpt_dir)
        checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.ex
        model = model_fn(model_config, 'sampling')
        model.set_num_sampling_points(5000)
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

        # build language model
        language_model = LanguageModel()
        language_model.build()
        language_model.set_cache_dir('test_empty')
        # language_model.set_cache_dir('v1_var_att_lowthresh_cache_restval_VAQ-VarRL')
        language_model.set_session(sess)
        language_model.setup_model()

        # build VQA model
    # vqa_model = N2MNWrapper()
    # vqa_model = MLBWrapper()
    num_batches = reader.num_batches

    quest_ids_to_vis = {5682052: 'bread',
                        965492: 'plane',
                        681282: 'station'}

    print('Running beam search inference...')
    results = []
    batch_vqa_scores = []

    num = FLAGS.max_iters if FLAGS.max_iters > 0 else num_batches
    for i in range(num):

        outputs = reader.get_test_batch()

        # inference
        quest_ids, image_ids = outputs[-2:]
        quest_id_key = int(quest_ids)

        if quest_id_key not in quest_ids_to_vis:
            continue
        # pdb.set_trace()

        im, gt_q, _, top_ans, ans_tokens, ans_len = outputs[:-2]
        # pdb.set_trace()
        if top_ans == 2000:
            continue

        print('\n%d/%d' % (i, num))
        question_id = int(quest_ids[0])
        image_id = int(image_ids[0])

        t1 = time()
        pathes, scores = model.greedy_inference([im, ans_tokens, ans_len], sess)

        # find unique
        ivqa_scores, ivqa_pathes = process_one(scores, pathes)
        t2 = time()
        print('Time for sample generation: %0.2fs' % (t2 - t1))

        # apply language model
        language_model_inputs = wrap_samples_for_language_model([ivqa_pathes],
                                                                pad_token=model.pad_token - 1,
                                                                max_length=20)
        match_gt = exemplar.query(ivqa_pathes)
        legality_scores = language_model.inference(language_model_inputs)
        legality_scores[match_gt] = 1.0
        num_keep = max(100, (legality_scores > 0.1).sum())  # no less than 100
        valid_inds = (-legality_scores).argsort()[:num_keep]
        print('keep: %d/%d' % (num_keep, len(ivqa_pathes)))

        t3 = time()
        print('Time for language model filtration: %0.2fs' % (t3 - t2))

        def token_arr_to_list(arr):
            return arr.flatten().tolist()

        for _pid, idx in enumerate(valid_inds):
            path = ivqa_pathes[idx]
            # sc = vqa_scores[idx]
            sentence = to_sentence.index_to_question(path)
            aug_quest_id = question_id * 1000 + _pid
            res_i = {'image_id': int(image_id),
                     'aug_id': aug_quest_id,
                     'question_id': question_id,
                     'target': sentence,
                     'top_ans_id': int(top_ans),
                     'question': to_sentence.index_to_question(token_arr_to_list(gt_q)),
                     'answer': to_sentence.index_to_answer(token_arr_to_list(ans_tokens))}
            results.append(res_i)

    save_json(res_file, results)
    return None
Пример #5
0
def ivqa_decoding_beam_search(checkpoint_path=None):
    model_config = ModelConfig()
    method = FLAGS.method
    res_file = 'result/bs_gen_%s.json' % method
    score_file = 'result/bs_vqa_scores_%s.mat' % method
    # Get model
    model_fn = get_model_creation_fn('VAQ-Var')
    create_fn = create_reader('VAQ-VVIS', phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # get data reader
    subset = 'kptrain'
    reader = create_fn(batch_size=1, subset=subset, version=FLAGS.test_version)

    exemplar = ExemplarLanguageModel()

    if checkpoint_path is None:
        if FLAGS.checkpoint_dir:
            ckpt_dir = FLAGS.checkpoint_dir
        else:
            ckpt_dir = FLAGS.checkpoint_pat % (FLAGS.version, FLAGS.model_type)
        # ckpt_dir = '/import/vision-ephemeral/fl302/models/v2_kpvaq_VAQ-RL/'
        ckpt = tf.train.get_checkpoint_state(ckpt_dir)
        checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.ex
        model = model_fn(model_config, 'sampling')
        model.set_num_sampling_points(5)
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

        # build language model
        language_model = LanguageModel()
        language_model.build()
        language_model.set_cache_dir('test_empty')
        # language_model.set_cache_dir('v1_var_att_lowthresh_cache_restval_VAQ-VarRL')
        language_model.set_session(sess)
        language_model.setup_model()

    num_batches = reader.num_batches

    print('Running beam search inference...')

    num = FLAGS.max_iters if FLAGS.max_iters > 0 else num_batches
    neg_pathes = []
    need_stop = False
    for i in range(num):

        outputs = reader.get_test_batch()

        # inference
        im, _, _, top_ans, ans_tokens, ans_len = outputs[:-2]
        if top_ans == 2000:
            continue

        print('\n%d/%d' % (i, num))

        t1 = time()
        pathes, scores = model.greedy_inference([im, ans_tokens, ans_len],
                                                sess)

        # find unique
        ivqa_scores, ivqa_pathes = process_one(scores, pathes)
        t2 = time()
        print('Time for sample generation: %0.2fs' % (t2 - t1))

        # apply language model
        language_model_inputs = wrap_samples_for_language_model(
            [ivqa_pathes], pad_token=model.pad_token - 1, max_length=20)
        match_gt = exemplar.query(ivqa_pathes)
        legality_scores = language_model.inference(language_model_inputs)
        legality_scores[match_gt] = 1.0

        neg_inds = np.where(legality_scores < 0.2)[0]
        for idx in neg_inds:
            ser_neg = serialize_path(ivqa_pathes[idx][1:])
            neg_pathes.append(ser_neg)
            if len(neg_pathes) > 100000:
                need_stop = True
                break
            # if len(neg_pathes) > 1000:
            #     need_stop = True
            #     break
            # print('Neg size: %d' % len(neg_pathes))
        if need_stop:
            break
    sv_file = 'data/lm_init_neg_pathes.json'
    save_json(sv_file, neg_pathes)
Пример #6
0
sp_model = model(**sp_params)
qg_params = json.load(
    open(os.path.join(params['read_qg_model_path'], 'params.json'), 'r'))
qg_model = model(**qg_params)
if not opt.testing:
    sp_model.load_model(os.path.join(params['read_sp_model_path'],
                                     'model.pkl'))
    logger.info("Load Semantic Parsing model from path %s" %
                (params['read_sp_model_path']))
    qg_model.load_model(os.path.join(params['read_qg_model_path'],
                                     'model.pkl'))
    logger.info("Load Question Generation model from path %s" %
                (params['read_qg_model_path']))
    qlm_params = json.load(
        open(os.path.join(params['read_qlm_path'], 'params.json'), 'r'))
    qlm_model = LanguageModel(**qlm_params)
    qlm_model.load_model(os.path.join(params['read_qlm_path'], 'model.pkl'))
    logger.info("Load Question Language Model from path %s" %
                (params['read_qlm_path']))
    lflm_params = json.load(
        open(os.path.join(params['read_lflm_path'], 'params.json'), 'r'))
    lflm_model = LanguageModel(**lflm_params)
    lflm_model.load_model(os.path.join(params['read_lflm_path'], 'model.pkl'))
    logger.info("Load Logical Form Language Model from path %s" %
                (params['read_lflm_path']))
    reward_model = RewardModel(opt.dataset,
                               qlm_model,
                               lflm_model,
                               lm_vocab,
                               sp_device=sp_device,
                               qg_device=qg_device)
from data_loaders.language_model_loader import LanguageModelLoader
from models.language_model import LanguageModel
from helpers import constants

base_path = 'datasets/newsqa'
language_model_loader = LanguageModelLoader(
    base_path, tokenizer_type=constants.TOKENIZER_NLTK)
language_model_loader.reset_indices()
batch = language_model_loader.get_batch(dataset_type=constants.DATASET_TRAIN,
                                        batch_size=10)

config = {}
config['vocab_size'] = language_model_loader.get_vocab().size()
config['hidden_size'] = 100
config['embedding_size'] = 300
config['num_layers'] = 1
config['dropout'] = 0.0
config['batch_first'] = False
config['batch_size'] = 24
config['learning_rate'] = 1e-3
config['log_path'] = 'logs.txt'
config['save_directory'] = 'logs/squad_saved_data'
config['use_pretrained_embeddings'] = True
config['pretrained_embeddings_path'] = 'datasets/squad/word_embeddings.npy'
config['finetune_embeddings'] = False
config['load_model'] = True
config['load_path'] = 'logs/squad_saved_data/model_7_old.pyt7'

language_model = LanguageModel(config)
from models.language_model import LanguageModel 
import torch 
from torch import nn
from torch import optim
from torch.autograd import variable 
from helpers import torch_utils 

config = {}
config['vocab_size'] = 25 
config['hidden_size'] = 50
config['embedding_size'] = 10 
config['num_layers'] = 1
config['dropout'] = 0.0
config['batch_first'] = False

language_model = LanguageModel(config)
language_model.cuda()
# contexts: context_length x batch_size
# inputs: input_length x batch_size
# desired_inputs: input_length x batch_size

input_token = variable.Variable(torch.LongTensor([[1]]))
context_tokens = variable.Variable(torch.LongTensor([[2], [3], [4], [5], [6], [7], [8]]))
language_model.predict(input_token, context_tokens, torch.LongTensor([[1]]))
Пример #9
0
    if not os.path.exists(vocabs_path):
        os.mkdir(vocabs_path)

    chars_vocab_path = os.path.join(vocabs_path, "char_vocabulary.pkl")
    authors_vocab_path = os.path.join(vocabs_path, "authors_vocabulary.pkl")
    families_vocab_path = os.path.join(vocabs_path, "families_vocabulary.pkl")

    home_path = os.path.join(os.getcwd())
    base_dir = os.path.join(home_path, "savings")  # experiments directory
    exp_dir = "vulgaris_analyse_language_varieties"
    exp_path = os.path.join(base_dir, exp_dir)
    if not os.path.exists(exp_path):
        os.mkdir(exp_path)  # this experiment directory

    if not os.path.exists(chars_vocab_path):
        config, _, _ = LanguageModel.get_params(FLAGS)
        dataset = CharLMDataset(config)
        dataset.build(filename, split_size=2)  # to create vocabs

        chars_vocab = dataset.vocabulary
        authors_vocab = dataset.authors_vocabulary
        families_vocab = dataset.families_vocabulary

        save_data(chars_vocab, chars_vocab_path)
        save_data(authors_vocab, authors_vocab_path)
        save_data(families_vocab, families_vocab_path)

    else:
        chars_vocab = load_data(chars_vocab_path)
        authors_vocab = load_data(authors_vocab_path)
        families_vocab = load_data(families_vocab_path)
Пример #10
0
def train():
    model_config = ModelConfig()
    training_config = TrainConfig()

    # Get model
    model_fn = get_model_creation_fn(FLAGS.model_type)
    reader_fn = create_reader('VAQ-EpochAtt', phase='train')

    env = MixReward(attention_vqa=True)
    env.set_cider_state(use_cider=True)
    env.diversity_reward.mode = 'winner_take_all'
    env.set_language_thresh(1.0 / 3.0)
    # env.set_replay_buffer(insert_thresh=0.1,
    #                       sv_dir='vqa_replay_buffer/low_att')  # if 0.5, already fooled others

    # Create training directory.
    train_dir = FLAGS.train_dir % (FLAGS.version, FLAGS.model_type)
    if not tf.gfile.IsDirectory(train_dir):
        tf.logging.info("Creating training directory: %s", train_dir)
        tf.gfile.MakeDirs(train_dir)
    ckpt_suffix = train_dir.split('/')[-1]

    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = model_fn(model_config, 'train')
        model.set_init_ckpt(
            'model/v1_var_ivqa_restvalr2_VAQ-Var/model.ckpt-374000')
        model.build()

        # Set up the learning rate.u
        learning_rate = tf.constant(training_config.initial_learning_rate *
                                    0.1)

        def _learning_rate_decay_fn(learn_rate, global_step):
            return tf.train.exponential_decay(
                learn_rate,
                global_step,
                decay_steps=training_config.decay_step,
                decay_rate=training_config.decay_factor,
                staircase=False)

        learning_rate_decay_fn = _learning_rate_decay_fn

        train_op = tf.contrib.layers.optimize_loss(
            loss=model.loss,
            global_step=model.global_step,
            learning_rate=learning_rate,
            optimizer=training_config.optimizer,
            clip_gradients=training_config.clip_gradients,
            learning_rate_decay_fn=learning_rate_decay_fn)

        # Set up the Saver for saving and restoring model checkpoints.
        saver = tf.train.Saver(
            max_to_keep=training_config.max_checkpoints_to_keep)

        # Setup summaries
        summary_op = tf.summary.merge_all()

        # Setup language model
        lm = LanguageModel()
        lm.build()
        lm.set_cache_dir(ckpt_suffix)
        env.set_language_model(lm)

    # create reader
    reader = reader_fn(
        batch_size=16,
        subset='kprestval',  # 'kptrain'
        version=FLAGS.version)

    # Run training.
    training_util.train(train_op,
                        train_dir,
                        log_every_n_steps=FLAGS.log_every_n_steps,
                        graph=g,
                        global_step=model.global_step,
                        number_of_steps=FLAGS.number_of_steps,
                        init_fn=model.init_fn,
                        saver=saver,
                        reader=reader,
                        model=model,
                        summary_op=summary_op,
                        env=env)
def ivqa_decoding_beam_search(ckpt_dir, method):
    model_config = ModelConfig()
    inf_type = 'beam'
    assert (inf_type in ['beam', 'rand'])
    # method = FLAGS.method
    if inf_type == 'rand':
        res_file = 'result/bs_RL2_cands_LM_%s.json' % method
    else:
        res_file = 'result/bs_RL2_cands_LM_%s_BEAM.json' % method
    if os.path.exists(res_file):
        print('File %s already exist, skipped' % res_file)
        return
    # score_file = 'result/bs_vqa_scores_%s.mat' % method
    # Get model
    model_fn = get_model_creation_fn('VAQ-Var')
    create_fn = create_reader('VAQ-VVIS', phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # get data reader
    subset = 'bs_test'
    reader = create_fn(batch_size=1, subset=subset,
                       version=FLAGS.test_version)

    exemplar = ExemplarLanguageModel()

    # if checkpoint_path is None:
    #     if FLAGS.checkpoint_dir:
    #         ckpt_dir = FLAGS.checkpoint_dir
    #     else:
    #         ckpt_dir = FLAGS.checkpoint_pat % (FLAGS.version, FLAGS.model_type)
    # ckpt_dir = '/import/vision-ephemeral/fl302/models/v2_kpvaq_VAQ-RL/'
    ckpt = tf.train.get_checkpoint_state(ckpt_dir)
    checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.ex
        if inf_type == 'rand':
            model = model_fn(model_config, 'sampling')
            model.set_num_sampling_points(1000)
        else:
            model = model_fn(model_config, 'sampling_beam')
            model.set_num_sampling_points(1000)
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

        # build language model
        language_model = LanguageModel()
        language_model.build()
        language_model.set_cache_dir('test_empty')
        # language_model.set_cache_dir('v1_var_att_lowthresh_cache_restval_VAQ-VarRL')
        language_model.set_session(sess)
        language_model.setup_model()

        # build VQA model
    # vqa_model = N2MNWrapper()
    # vqa_model = MLBWrapper()
    num_batches = reader.num_batches

    print('Running beam search inference...')
    results = {}
    # batch_vqa_scores = []

    num = FLAGS.max_iters if FLAGS.max_iters > 0 else num_batches
    for i in range(num):
        outputs = reader.get_test_batch()

        # inference
        quest_ids, image_ids = outputs[-2:]
        im, _, _, top_ans, ans_tokens, ans_len = outputs[:-2]
        # pdb.set_trace()
        if top_ans == 2000:
            continue

        print('\n%d/%d' % (i, num))
        question_id = int(quest_ids[0])
        image_id = int(image_ids[0])

        t1 = time()
        pathes, scores = model.greedy_inference([im, ans_tokens, ans_len], sess)

        # find unique
        ivqa_scores, ivqa_pathes = process_one(scores, pathes)
        t2 = time()
        print('Time for sample generation: %0.2fs' % (t2 - t1))

        # apply language model
        language_model_inputs = wrap_samples_for_language_model([ivqa_pathes],
                                                                pad_token=model.pad_token - 1,
                                                                max_length=20)
        match_gt = exemplar.query(ivqa_pathes)
        legality_scores = language_model.inference(language_model_inputs)
        legality_scores[match_gt] = 1.0
        num_keep = max(100, (legality_scores > 0.3).sum())  # no less than 100
        valid_inds = (-legality_scores).argsort()[:num_keep]

        t3 = time()
        print('Time for language model filtration: %0.2fs' % (t3 - t2))

        # for idx in valid_inds:
        #     path = ivqa_pathes[idx]
        #     sc = legality_scores[idx]
        #     sentence = to_sentence.index_to_question(path)
        #     # questions.append(sentence)
        #     print('%s (%0.3f)' % (sentence, sc))

        # apply  VQA model
        sampled = [ivqa_pathes[_idx] for _idx in valid_inds]
        legality_scores = legality_scores[valid_inds]

        result_key = int(question_id)
        tmp = []
        for idx, path in enumerate(sampled):
            # path = sampled[idx]
            sc = legality_scores[idx]
            sentence = to_sentence.index_to_question(path)
            # aug_quest_id = question_id * 1000 + _pid
            res_i = {'image_id': int(image_id),
                     'aug_id': idx,
                     'question_id': question_id,
                     'question': sentence,
                     'score': float(sc)}
            tmp.append(res_i)
        print('Number of unique questions: %d' % len(tmp))
        results[result_key] = tmp

    save_json(res_file, results)
Пример #12
0
    batches = get_seq_batches(data_split, params.batch_size, params.primal)
    criterion = nn.NLLLoss()

    eval_loss = eval_samples(model, batches, criterion, params)
    print('Evaluation loss: {:.3f} | ppl: {:.3f}'.format(
        eval_loss, math.exp(eval_loss)))


if __name__ == '__main__':
    params = get_params()

    data = Corpus(params)
    model = LanguageModel(
        len(data.utter_vocab) if params.primal else len(data.query_vocab) +
        len(data.schema_vocab),
        params.emb_dim,
        params.hidden_dim,
        params.num_layers,
        dropout=params.dropout,
        tie_weights=params.tie_weights).to(device)

    print('=====================Model Parameters=====================')
    for name, param in model.named_parameters():
        print(name, param.requires_grad, param.is_cuda, param.size())
        # assert param.is_cuda

    sys.stdout.flush()

    checkpoint = os.path.join(params.save_dir,
                              '{}.pt'.format(params.task_name))
    if os.path.exists(checkpoint):
        model.load(checkpoint)
Пример #13
0
from logic.language_model_construction import construct_language_model_from_tabnak_collection
from models.dictionary import SCDictionary
from models.language_model import LanguageModel
from testing.test import test

guide_string = "\n\n*********************** welcome to my spell checker project *****************************\n\n" \
               "Please enter on of choices:\n\n" \
               "1- Construct dictionary\n" \
               "2- Construct language model\n" \
               "3- Correcting a word\n"\
               "4- Testing system\n"
number_of_choice = 4

dictionary = SCDictionary()
dictionary.fetch_from_file()
language_model = LanguageModel(dictionary)
language_model.fetch_from_file()

normalizer = Normalizer()
#clean_data()


while True:
    inp = input(guide_string)
    try:
        choice = int(inp)
        if choice < 1 or choice > number_of_choice:
            raise ValueError
    except ValueError:
        print("Your choice was not valid")
        continue
Пример #14
0
def ivqa_decoding_beam_search(checkpoint_path=None):
    model_config = ModelConfig()
    method = FLAGS.method
    res_file = 'result/bs_gen_%s.json' % method
    score_file = 'result/bs_vqa_scores_%s.mat' % method
    # Get model
    model_fn = get_model_creation_fn('VAQ-Var')
    create_fn = create_reader('VAQ-VVIS', phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # get data reader
    subset = 'kptest'
    reader = create_fn(batch_size=1, subset=subset, version=FLAGS.test_version)

    exemplar = ExemplarLanguageModel()

    if checkpoint_path is None:
        if FLAGS.checkpoint_dir:
            ckpt_dir = FLAGS.checkpoint_dir
        else:
            ckpt_dir = FLAGS.checkpoint_pat % (FLAGS.version, FLAGS.model_type)
        # ckpt_dir = '/import/vision-ephemeral/fl302/models/v2_kpvaq_VAQ-RL/'
        ckpt = tf.train.get_checkpoint_state(ckpt_dir)
        checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.ex
        model = model_fn(model_config, 'sampling')
        model.set_num_sampling_points(1000)
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

        # build language model
        language_model = LanguageModel()
        language_model.build()
        language_model.set_cache_dir('test_empty')
        # language_model.set_cache_dir('v1_var_att_lowthresh_cache_restval_VAQ-VarRL')
        language_model.set_session(sess)
        language_model.setup_model()

        # build VQA model
        vqa_model = VQAWrapper(g, sess)
    # vqa_model = MLBWrapper()
    num_batches = reader.num_batches

    print('Running beam search inference...')
    results = []
    batch_vqa_scores = []

    num = FLAGS.max_iters if FLAGS.max_iters > 0 else num_batches
    for i in range(num):

        outputs = reader.get_test_batch()

        # inference
        quest_ids, image_ids = outputs[-2:]
        im, _, _, top_ans, ans_tokens, ans_len = outputs[:-2]
        # pdb.set_trace()
        if top_ans == 2000:
            continue

        print('\n%d/%d' % (i, num))
        question_id = int(quest_ids[0])
        image_id = int(image_ids[0])

        t1 = time()
        pathes, scores = model.greedy_inference([im, ans_tokens, ans_len],
                                                sess)

        # find unique
        ivqa_scores, ivqa_pathes = process_one(scores, pathes)
        t2 = time()
        print('Time for sample generation: %0.2fs' % (t2 - t1))

        # apply language model
        language_model_inputs = wrap_samples_for_language_model(
            [ivqa_pathes], pad_token=model.pad_token - 1, max_length=20)
        match_gt = exemplar.query(ivqa_pathes)
        legality_scores = language_model.inference(language_model_inputs)
        legality_scores[match_gt] = 1.0
        num_keep = max(100, (legality_scores > 0.1).sum())  # no less than 100
        valid_inds = (-legality_scores).argsort()[:num_keep]

        t3 = time()
        print('Time for language model filtration: %0.2fs' % (t3 - t2))

        # for idx in valid_inds:
        #     path = ivqa_pathes[idx]
        #     sc = legality_scores[idx]
        #     sentence = to_sentence.index_to_question(path)
        #     # questions.append(sentence)
        #     print('%s (%0.3f)' % (sentence, sc))

        # apply  VQA model
        sampled = [ivqa_pathes[_idx] for _idx in valid_inds]
        # vqa_scores = vqa_model.get_scores(sampled, image_id, top_ans)
        vqa_scores, is_valid = vqa_model.get_scores(sampled, im, top_ans)
        # conf_inds = (-vqa_scores).argsort()[:20]
        conf_inds = np.where(is_valid)[0]
        # pdb.set_trace()
        # conf_inds = (-vqa_scores).argsort()[:40]

        t4 = time()
        print('Time for VQA verification: %0.2fs' % (t4 - t3))

        this_mean_vqa_score = vqa_scores[conf_inds].mean()
        print('sampled: %d, unique: %d, legal: %d, gt: %d, mean score %0.2f' %
              (pathes.shape[0], len(ivqa_pathes), num_keep, match_gt.sum(),
               this_mean_vqa_score))
        batch_vqa_scores.append(this_mean_vqa_score)

        for _pid, idx in enumerate(conf_inds):
            path = sampled[idx]
            sc = vqa_scores[idx]
            sentence = to_sentence.index_to_question(path)
            aug_quest_id = question_id * 1000 + _pid
            res_i = {
                'image_id': int(image_id),
                'question_id': aug_quest_id,
                'question': sentence,
                'score': float(sc)
            }
            results.append(res_i)

    save_json(res_file, results)
    batch_vqa_scores = np.array(batch_vqa_scores, dtype=np.float32)
    mean_vqa_score = batch_vqa_scores.mean()
    from scipy.io import savemat
    savemat(score_file, {
        'scores': batch_vqa_scores,
        'mean_score': mean_vqa_score
    })
    print('BS mean VQA score: %0.3f' % mean_vqa_score)
    return res_file, mean_vqa_score
config['batch_size'] = 24
config['learning_rate'] = 1e-3
config['beam_size'] = 5
config['log_path'] = 'logs.txt'
config['save_directory'] = 'logs/squad_saved_data'
config['use_pretrained_embeddings'] = True
config['pretrained_embeddings_path'] = 'datasets/squad/word_embeddings.npy'
config['finetune_embeddings'] = False
config['load_model'] = False
config['gpu_mode'] = True
config[
    'load_path'] = 'logs/squad_saved_data/model_6.pyt7'  # CHANGE THIS TO WHATEVER PATH YOU WANT

io_utils.check_dir('logs/squad_saved_data')

language_model = LanguageModel(config)
if config['load_model']:
    language_model = torch_utils.load_model(config['load_path'])

language_model.cuda()
language_wrapper = LanguageWrapper(language_model,
                                   language_model_loader.get_vocab())
language_trainer = LanguageTrainer(config, language_wrapper,
                                   language_model_loader)

for i in range(0, 15):
    loss, accuracy, predictions = language_trainer.train(epoch_num=i)

    if i % 3 == 2:
        predictions = language_trainer.predict(
            dataset_type=constants.DATASET_TEST,
Пример #16
0
def train():
    model_config = ModelConfig()
    training_config = TrainConfig()

    # Get model
    model_fn = get_model_creation_fn(FLAGS.model_type)
    reader_fn = create_reader('VAQ-Var', phase='train')

    env = MixReward()
    env.diversity_reward.mode = 'winner_take_all'
    env.set_cider_state(False)
    env.set_language_thresh(0.2)

    # Create training directory.
    train_dir = FLAGS.train_dir % (FLAGS.version, FLAGS.model_type)
    if not tf.gfile.IsDirectory(train_dir):
        tf.logging.info("Creating training directory: %s", train_dir)
        tf.gfile.MakeDirs(train_dir)

    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = model_fn(model_config, 'train')
        model.build()

        # Set up the learning rate.u
        learning_rate = tf.constant(training_config.initial_learning_rate *
                                    0.1)

        def _learning_rate_decay_fn(learn_rate, global_step):
            return tf.train.exponential_decay(
                learn_rate,
                global_step,
                decay_steps=training_config.decay_step,
                decay_rate=training_config.decay_factor,
                staircase=False)

        learning_rate_decay_fn = _learning_rate_decay_fn

        train_op = tf.contrib.layers.optimize_loss(
            loss=model.loss,
            global_step=model.global_step,
            learning_rate=learning_rate,
            optimizer=training_config.optimizer,
            clip_gradients=training_config.clip_gradients,
            learning_rate_decay_fn=learning_rate_decay_fn)

        # Set up the Saver for saving and restoring model checkpoints.
        saver = tf.train.Saver(
            max_to_keep=training_config.max_checkpoints_to_keep)

        # Setup summaries
        summary_op = tf.summary.merge_all()

        # Setup language model
        lm = LanguageModel()
        lm.build()
        env.set_language_model(lm)

    # create reader
    reader = reader_fn(
        batch_size=16,
        subset='kprestval',  # 'kptrain'
        version=FLAGS.version)

    # Run training.
    training_util.train(train_op,
                        train_dir,
                        log_every_n_steps=FLAGS.log_every_n_steps,
                        graph=g,
                        global_step=model.global_step,
                        number_of_steps=FLAGS.number_of_steps,
                        init_fn=model.init_fn,
                        saver=saver,
                        reader=reader,
                        model=model,
                        summary_op=summary_op,
                        env=env)
Пример #17
0
from torch import nn
from torch import optim
from torch.autograd import variable
from helpers import torch_utils

config = {}
config['vocab_size'] = 110000
config['hidden_size'] = 150
config['embedding_size'] = 100
config['num_layers'] = 1
config['dropout'] = 0.0
config['batch_first'] = False
config['use_pretrained_embeddings'] = False
config['finetune_embeddings'] = True

language_model = LanguageModel(config).cuda()

# contexts: context_length x batch_size
# inputs: input_length x batch_size
# desired_inputs: input_length x batch_size

optimizer = optim.Adam(language_model.parameters(), lr=3e-2)
criterion = nn.NLLLoss()

for i in range(0, 1000):
    optimizer.zero_grad()
    inputs = variable.Variable(torch.LongTensor([[1, 2, 3, 4, 5, 6, 7]] *
                                                100)).cuda()
    contexts = variable.Variable(
        torch.LongTensor([[4, 5, 6, 7, 8, 9, 10], [4, 5, 6, 7, 8, 9, 10],
                          [4, 5, 6, 7, 8, 9, 10], [4, 5, 6, 7, 8, 9,