def test():
    create_fn = create_reader('VAQ-Var', 'test')
    reader = create_fn(batch_size=4)
    reader.start()
    inputs = reader.pop_batch()

    d_quest = inputs[1]
    d_quest_len = inputs[2]
    inputs = process_input_data([None, d_quest, d_quest_len], 15954)
    d_quest = inputs[1]
    d_quest_len = inputs[2]

    def print_array(arr, name):
        print('%s:' % name)
        print(arr)

    quest = tf.placeholder(tf.int32, [None, None])
    quest_len = tf.placeholder(tf.int32, None)
    vqa_quest_in, vqa_quest_len = _build_vqa_inputs(quest, quest_len)

    sess = tf.Session()
    print_array(d_quest, 'q')
    print_array(d_quest_len, 'q_len')

    vqa_q, vqa_q_len = sess.run([vqa_quest_in, vqa_quest_len],
                                feed_dict={
                                    quest: d_quest,
                                    quest_len: d_quest_len
                                })
    print_array(vqa_q, 'vqa_q')
    print_array(vqa_q_len, 'vqa_q_len')
    reader.stop()
def test():
    top_ans_file = '/import/vision-ephemeral/fl302/code/' \
                   'VQA-tensorflow/data/vqa_trainval_top2000_answers.txt'
    # top_ans_file = 'data/vqa_trainval_top2000_answers.txt'
    mc_ctx = MultiChoiceQuestionManger(subset='val', load_ans=True,
                                       top_ans_file=top_ans_file)
    to_sentence = SentenceGenerator(trainset='trainval',
                                    top_ans_file=top_ans_file)
    answer_enc = mc_ctx.encoder
    # quest_ids = mc_ctx._quest_id2image_id.keys()
    # quest_ids = np.array(quest_ids)

    # qids = np.random.choice(quest_ids, size=(5,), replace=False)

    create_fn = create_reader('VAQ-CA', 'train')
    reader = create_fn(batch_size=4, subset='kprestval')
    reader.start()

    for _ in range(20):
        # inputs = reader.get_test_batch()
        inputs = reader.pop_batch()

        _, _, _, _, labels, ans_seq, ans_len, quest_ids, image_ids = inputs

        b_top_ans = answer_enc.get_top_answers(labels)
        for i, (quest_id, i_a) in enumerate(zip(quest_ids, b_top_ans)):
            print('question id: %d' % quest_id)
            gt = mc_ctx.get_gt_answer(quest_id)
            print('GT: %s' % gt)
            print('Top: %s' % i_a)
            print('SG: top: %s' % to_sentence.index_to_top_answer(labels[i]))
            seq = ans_seq[i][:ans_len[i]].tolist()
            print('SG: seq: %s\n' % to_sentence.index_to_answer(seq))

    reader.stop()
Пример #3
0
def main(_):
    batch_size = 4
    create_fn = create_reader('VAQ-2Att', phase='train')
    to_sentence = SentenceGenerator(trainset='trainval')

    def trim_sequence(seqs, seqs_len, idx):
        seq = seqs[idx]
        seq_len = seqs_len[idx]
        return seq[:seq_len]

    def test_reader(reader):
        reader.start()
        for i in range(5):
            inputs = reader.pop_batch()
            im, attr, capt, capt_len, ans_seq, ans_seq_len = inputs
            question = to_sentence.index_to_question(
                trim_sequence(capt, capt_len, 1))
            answer = to_sentence.index_to_answer(
                trim_sequence(ans_seq, ans_seq_len, 1))
            print('Q: %s\nA: %s\n' % (question, answer))
        reader.stop()

    print('v1:')
    reader = create_fn(batch_size, subset='kptrain', version='v1')
    test_reader(reader)
    del reader

    print('v2:')
    reader = create_fn(batch_size, subset='kptrain', version='v2')
    test_reader(reader)
    del reader
def ivqa_decoding_beam_search(checkpoint_path=None, subset='kpval'):
    model_config = ModelConfig()
    res_file = 'result/quest_vaq_greedy_%s_%s.json' % (
        FLAGS.model_type.upper(), subset)
    # Get model
    model_fn = get_model_creation_fn(FLAGS.model_type)
    create_fn = create_reader(FLAGS.model_type, phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # get data reader
    reader = create_fn(batch_size=100,
                       subset=subset,
                       version=FLAGS.test_version)

    if checkpoint_path is None:
        ckpt_dir = FLAGS.checkpoint_dir % (FLAGS.version, FLAGS.model_type)
        # ckpt_dir = '/import/vision-ephemeral/fl302/models/v2_kpvaq_VAQ-RL/'
        ckpt = tf.train.get_checkpoint_state(ckpt_dir)
        checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = model_fn(model_config, 'beam')
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

    num_batches = reader.num_batches

    print('Running beam search inference...')
    results = []
    for i in range(num_batches):
        outputs = reader.get_test_batch()

        # inference
        quest_ids, image_ids = outputs[-2:]
        scores, pathes = model.greedy_inference(outputs[:-2], sess)

        scores, pathes = post_process_prediction(scores, pathes)
        question = to_sentence.index_to_question(pathes[0])
        print('%d/%d: %s' % (i, num_batches, question))

        for quest_id, image_id, path in zip(quest_ids, image_ids, pathes):
            sentence = to_sentence.index_to_question(path)
            res_i = {
                'image_id': int(image_id),
                'question_id': int(quest_id),
                'question': sentence
            }
            results.append(res_i)

    save_json(res_file, results)
    return res_file
Пример #5
0
def vaq_decoding_greedy(checkpoint_path=None, subset='kpval'):
    model_config = ModelConfig()
    res_file = 'result/quest_vaq_greedy_%s.json' % FLAGS.model_type.upper()

    # Get model
    model_fn = get_model_creation_fn(FLAGS.model_type)
    create_fn = create_reader(FLAGS.model_type, phase='test')
    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # build data reader
    reader = create_fn(batch_size=32, subset=subset)

    if checkpoint_path is None:
        ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir %
                                             FLAGS.model_type)
        checkpoint_path = ckpt.model_checkpoint_path

    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = model_fn(model_config, 'greedy')
        model.build()
        saver = tf.train.Saver()

        sess = tf.Session()
        tf.logging.info('Restore from model %s' %
                        os.path.basename(checkpoint_path))
        saver.restore(sess, checkpoint_path)

    num_batches = reader.num_batches

    print('Running greedy inference...')
    results = []
    for i in range(num_batches):
        outputs = reader.get_test_batch()

        # inference
        quest_ids, image_ids = outputs[-2:]
        scores, pathes = model.greedy_inference(outputs[:-2], sess)

        scores, pathes = post_process_prediction(scores, pathes)
        question = to_sentence.index_to_question(pathes[0])
        print('%d/%d: %s' % (i, num_batches, question))

        for quest_id, image_id, path in zip(quest_ids, image_ids, pathes):
            sentence = to_sentence.index_to_question(path)
            res_i = {
                'image_id': int(image_id),
                'question_id': int(quest_id),
                'question': sentence
            }
            results.append(res_i)

    save_json(res_file, results)
    return res_file
Пример #6
0
 def __init__(self, batch_size, subset='kpval', phase='train', version='v2'):
     # reader_fn = create_reader('Fusion%s' % ('v1' if version=='v1' else ''), phase=phase)
     reader_fn = create_reader('Fusion%s' % ('v1' if version=='v1' else ''), phase=phase)
     self.phase = phase
     self.version = version
     self.feat_reader = reader_fn(batch_size, subset, version=version)
     if version == 'v2':
         self.score_reader = RerankContext(subset=subset)
     else:
         self.score_reader = RerankContext_v1(subset=subset)
def sample_cst_questions(checkpoint_path=None, subset='kptrain'):
    model_config = ModelConfig()
    model_config.convert = FLAGS.convert
    model_config.loss_type = 'pairwise'
    model_config.top_k = 3
    batch_size = 8
    # Get model
    create_fn = create_reader(FLAGS.model_type, phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # get data reader
    reader = create_fn(batch_size=batch_size,
                       subset=subset,
                       version=FLAGS.test_version)

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = ContrastQuestionSampler(model_config)
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

    num_batches = reader.num_batches

    print('Running beam search inference...')

    for i in range(num_batches):
        outputs = reader.get_test_batch()

        # inference
        quest_ids, image_ids = outputs[-2:]
        c_ans, c_ans_len, pathes, scores = model.greedy_inference(
            outputs[:-2], sess)
        scores, pathes = post_process_prediction(scores, pathes)

        k = 3
        capt, capt_len = outputs[2:4]

        gt = capt[0, :capt_len[0]]
        print('gt: %s [%s]' %
              (to_sentence.index_to_question(gt),
               to_sentence.index_to_answer(c_ans[0, :c_ans_len[0]])))
        for ix in range(k):
            question = to_sentence.index_to_question(pathes[ix])
            answer = to_sentence.index_to_answer(c_ans[ix, :c_ans_len[ix]])
            print('%s %d: %s [%s]' %
                  ('pre' if ix == 0 else 'cst', ix, question, answer))
        import pdb
        pdb.set_trace()
Пример #8
0
def ivqa_decoding_beam_search(checkpoint_path=None):
    model_config = ModelConfig()
    method = FLAGS.method
    res_file = 'result/bs_gen_%s.json' % method
    score_file = 'result/bs_vqa_scores_%s.mat' % method
    # Get model
    model_fn = get_model_creation_fn('VAQ-Var')
    create_fn = create_reader('VAQ-VVIS', phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # get data reader
    subset = 'kptrain'
    reader = create_fn(batch_size=1, subset=subset, version=FLAGS.test_version)

    exemplar = ExemplarLanguageModel()

    if checkpoint_path is None:
        if FLAGS.checkpoint_dir:
            ckpt_dir = FLAGS.checkpoint_dir
        else:
            ckpt_dir = FLAGS.checkpoint_pat % (FLAGS.version, FLAGS.model_type)
        # ckpt_dir = '/import/vision-ephemeral/fl302/models/v2_kpvaq_VAQ-RL/'
        ckpt = tf.train.get_checkpoint_state(ckpt_dir)
        checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.ex
        model = model_fn(model_config, 'sampling')
        model.set_num_sampling_points(5)
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

        # build language model
        language_model = LanguageModel()
        language_model.build()
        language_model.set_cache_dir('test_empty')
        # language_model.set_cache_dir('v1_var_att_lowthresh_cache_restval_VAQ-VarRL')
        language_model.set_session(sess)
        language_model.setup_model()

    num_batches = reader.num_batches

    print('Running beam search inference...')

    num = FLAGS.max_iters if FLAGS.max_iters > 0 else num_batches
    neg_pathes = []
    need_stop = False
    for i in range(num):

        outputs = reader.get_test_batch()

        # inference
        im, _, _, top_ans, ans_tokens, ans_len = outputs[:-2]
        if top_ans == 2000:
            continue

        print('\n%d/%d' % (i, num))

        t1 = time()
        pathes, scores = model.greedy_inference([im, ans_tokens, ans_len],
                                                sess)

        # find unique
        ivqa_scores, ivqa_pathes = process_one(scores, pathes)
        t2 = time()
        print('Time for sample generation: %0.2fs' % (t2 - t1))

        # apply language model
        language_model_inputs = wrap_samples_for_language_model(
            [ivqa_pathes], pad_token=model.pad_token - 1, max_length=20)
        match_gt = exemplar.query(ivqa_pathes)
        legality_scores = language_model.inference(language_model_inputs)
        legality_scores[match_gt] = 1.0

        neg_inds = np.where(legality_scores < 0.2)[0]
        for idx in neg_inds:
            ser_neg = serialize_path(ivqa_pathes[idx][1:])
            neg_pathes.append(ser_neg)
            if len(neg_pathes) > 100000:
                need_stop = True
                break
            # if len(neg_pathes) > 1000:
            #     need_stop = True
            #     break
            # print('Neg size: %d' % len(neg_pathes))
        if need_stop:
            break
    sv_file = 'data/lm_init_neg_pathes.json'
    save_json(sv_file, neg_pathes)
def train():
    model_config = ModelConfig()
    training_config = TrainConfig()
    # model_config.batch_size = 8

    # Get model
    model_fn = get_model_creation_fn(FLAGS.model_type)
    reader_fn = create_reader('V7W-VarDS', phase='train')

    # Create training directory.
    train_dir = FLAGS.train_dir % (FLAGS.trainset, FLAGS.model_type)
    if not tf.gfile.IsDirectory(train_dir):
        tf.logging.info("Creating training directory: %s", train_dir)
        tf.gfile.MakeDirs(train_dir)

    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = model_fn(model_config, 'train')
        model.build()

        # Set up the learning rate.u
        learning_rate = tf.constant(training_config.initial_learning_rate)

        def _learning_rate_decay_fn(learn_rate, global_step):
            return tf.train.exponential_decay(
                learn_rate,
                global_step,
                decay_steps=training_config.decay_step,
                decay_rate=training_config.decay_factor,
                staircase=False)

        learning_rate_decay_fn = _learning_rate_decay_fn

        train_op = tf.contrib.layers.optimize_loss(
            loss=model.loss,
            global_step=model.global_step,
            learning_rate=learning_rate,
            optimizer=training_config.optimizer,
            clip_gradients=training_config.clip_gradients,
            learning_rate_decay_fn=learning_rate_decay_fn)

        # Set up the Saver for saving and restoring model checkpoints.
        saver = tf.train.Saver(
            max_to_keep=training_config.max_checkpoints_to_keep)

        # Setup summaries
        summary_op = tf.summary.merge_all()

    # create reader
    reader = reader_fn(
        batch_size=model_config.batch_size,
        subset=FLAGS.trainset,  # 'kptrain'
        version='v1')

    # Run training.
    training_util.train(train_op,
                        train_dir,
                        log_every_n_steps=FLAGS.log_every_n_steps,
                        graph=g,
                        global_step=model.global_step,
                        number_of_steps=FLAGS.number_of_steps,
                        init_fn=model.init_fn,
                        saver=saver,
                        reader=reader,
                        feed_fn=model.fill_feed_dict,
                        summary_op=summary_op)
Пример #10
0
def ivqa_decoding_beam_search(checkpoint_path=None, subset=FLAGS.subset):
    model_config = ModelConfig()
    _model_suffix = 'var_' if FLAGS.use_var else ''
    res_file = 'data_rl/%sivqa_%s_questions.json' % (_model_suffix,
                                                     FLAGS.subset)
    # Get model
    model_fn = get_model_creation_fn('VAQ-Var')
    create_fn = create_reader('VAQ-Var', phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # get data reader
    batch_size = 64
    reader = create_fn(batch_size=batch_size,
                       subset=subset,
                       version=FLAGS.test_version)

    if checkpoint_path is None:
        if FLAGS.use_var:  # variational models
            ckpt_dir = FLAGS.checkpoint_dir % (FLAGS.version, FLAGS.model_type)
        else:  # standard models
            ckpt_dir = FLAGS.checkpoint_dir % ('kprestval', FLAGS.model_type)
        # ckpt_dir = '/import/vision-ephemeral/fl302/models/v2_kpvaq_VAQ-RL/'
        ckpt = tf.train.get_checkpoint_state(ckpt_dir)
        checkpoint_path = ckpt.model_checkpoint_path

    mode = 'sampling' if FLAGS.use_var else 'beam'

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = model_fn(model_config, mode)
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

    num_batches = reader.num_batches

    print('Running beam search inference...')
    results = []
    extend_questions = []
    extended_question_ids = []
    for i in range(num_batches):
        print('iter: %d/%d' % (i, num_batches))
        outputs = reader.get_test_batch()

        # inference
        quest_ids, image_ids = outputs[-2:]
        scores, pathes = model.greedy_inference(outputs[:-2], sess)
        scores, pathes = post_process_prediction(scores,
                                                 pathes,
                                                 add_start_end=False)

        # process for each sample
        _this_batch_size = quest_ids.shape[0]
        num_sampled = int(len(pathes) / _this_batch_size)
        _noise_offset = np.arange(0, num_sampled,
                                  dtype=np.int32) * _this_batch_size
        for _s_id in range(_this_batch_size):
            _index = _noise_offset + _s_id
            try:
                cur_scores = [scores[_idx] for _idx in _index]
                cur_pathes = [pathes[_idx] for _idx in _index]
            except Exception, e:
                print(str(e))
                pdb.set_trace()

            cur_scores, cur_pathes = find_unique_pathes(cur_scores, cur_pathes)
            question_id = int(quest_ids[_s_id])
            image_id = image_ids[_s_id]

            for _pid, path in enumerate(cur_pathes):
                sentence = to_sentence.index_to_question(path)
                extended_question_ids.append([question_id, _pid])
                aug_quest_id = question_id * 1000 + _pid
                res_i = {
                    'image_id': int(image_id),
                    'question_id': aug_quest_id,
                    'question': sentence
                }
                results.append(res_i)
            extend_questions += cur_pathes
def test(checkpoint_path=None):
    batch_size = 40
    config = ModelConfig()
    config.convert = True
    config.ivqa_rerank = True  # VQA baseline or re-rank
    config.loss_type = FLAGS.loss_type
    # Get model function
    model_fn = get_model_creation_fn(FLAGS.model_type)
    # ana_ctx = RerankAnalysiser()

    # build data reader
    reader_fn = create_reader(FLAGS.model_type, phase='test')
    reader = reader_fn(batch_size=batch_size, subset='kp%s' % FLAGS.testset,
                       version=FLAGS.version)
    if checkpoint_path is None:
        ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir % (FLAGS.version,
                                                                     FLAGS.model_type))
        checkpoint_path = ckpt.model_checkpoint_path
    print(checkpoint_path)

    # build and restore model
    model = model_fn(config, phase='evaluate')
    model.build()
    # prob = model.prob

    sess = tf.Session(graph=tf.get_default_graph())
    tf.logging.info('Restore from model %s' % os.path.basename(checkpoint_path))
    if FLAGS.restore:
        saver = tf.train.Saver()
        saver.restore(sess, checkpoint_path)
    else:
        sess.run(tf.initialize_all_variables())
        model.init_fn(sess)

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    ans_ids = []
    quest_ids = []

    b_rerank_scores = []
    b_vqa_scores = []
    b_cand_labels = []
    print('Running inference on split %s...' % FLAGS.testset)
    for i in range(reader.num_batches):
        if i % 10 == 0:
            update_progress(i / float(reader.num_batches))
        outputs = reader.get_test_batch()
        model_preds = model.inference_rerank_vqa(outputs[:4], sess)
        score, top_ans, _, _, _ = model_preds
        ivqa_score, ivqa_top_ans, ivqa_scores, vqa_top_ans, vqa_scores = model_preds
        b_rerank_scores.append(ivqa_scores)
        b_vqa_scores.append(vqa_scores)
        b_cand_labels.append(vqa_top_ans)
        # if i > 100:
        #     break
        # ana_ctx.update(outputs, model_preds)

        ans_ids.append(top_ans)
        quest_id = outputs[-2]
        quest_ids.append(quest_id)
    # save preds
    b_rerank_scores = np.concatenate(b_rerank_scores, axis=0)
    b_vqa_scores = np.concatenate(b_vqa_scores, axis=0)
    b_cand_labels = np.concatenate(b_cand_labels, axis=0)
    quest_ids = np.concatenate(quest_ids)
    from util import save_hdf5
    save_hdf5('data/rerank_kptest.h5', {'ivqa': b_rerank_scores,
                                         'vqa': b_vqa_scores,
                                         'cands': b_cand_labels,
                                         'quest_ids': quest_ids})

    # ana_ctx.compute_accuracy()

    ans_ids = np.concatenate(ans_ids)
    result = [{u'answer': to_sentence.index_to_top_answer(aid),
               u'question_id': qid} for aid, qid in zip(ans_ids, quest_ids)]

    # save results
    tf.logging.info('Saving results')
    res_file = FLAGS.result_format % (FLAGS.version, FLAGS.testset)
    json.dump(result, open(res_file, 'w'))
    tf.logging.info('Done!')
    tf.logging.info('#Num eval samples %d' % len(result))
    # ana_ctx.close()
    return res_file, quest_ids
Пример #12
0
def ivqa_decoding_beam_search(checkpoint_path=None, subset='kptest'):
    model_config = ModelConfig()
    res_file = 'result/var_vaq_beam_%s_%s.json' % (FLAGS.model_type.upper(),
                                                   FLAGS.mode)
    # Get model
    model_fn = get_model_creation_fn(FLAGS.model_type)
    create_fn = create_reader(FLAGS.model_type, phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # get data reader
    reader = create_fn(batch_size=50,
                       subset=subset,
                       version=FLAGS.test_version)

    if checkpoint_path is None:
        ckpt_dir = FLAGS.checkpoint_dir % (FLAGS.version, FLAGS.model_type)
        # ckpt_dir = '/import/vision-ephemeral/fl302/models/v2_kpvaq_VAQ-RL/'
        ckpt = tf.train.get_checkpoint_state(ckpt_dir)
        checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = model_fn(model_config, 'sampling_beam')
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

    num_batches = reader.num_batches

    print('Running beam search inference...')
    results = []
    for i in range(num_batches):
        print('iter: %d/%d' % (i, num_batches))
        # if i >= 10:
        #     break
        outputs = reader.get_test_batch()

        # inference
        quest_ids, image_ids = outputs[-2:]
        scores, pathes = model.greedy_inference(outputs[:-2], sess)

        # wrap inputs
        _this_batch_size = quest_ids.size
        seq_len = pathes.shape[1]
        dummy_scores = np.tile(scores[:, np.newaxis], [1, seq_len])
        # dummy_scores = np.zeros_like(pathes, dtype=np.float32)
        ivqa_scores, ivqa_pathes, ivqa_counts = post_process_variation_questions_with_count(
            dummy_scores, pathes, _this_batch_size)
        # scores, pathes = convert_to_unique_questions(scores, pathes)

        for _q_idx, (ps, scs, cs) in enumerate(
                zip(ivqa_pathes, ivqa_scores, ivqa_counts)):
            image_id = image_ids[_q_idx]
            question_id = int(quest_ids[_q_idx])
            if FLAGS.mode == 'full':
                for _p_idx, p in enumerate(ps):
                    sentence = to_sentence.index_to_question(p)
                    aug_quest_id = question_id * 1000 + _p_idx
                    res_i = {
                        'image_id': int(image_id),
                        'question_id': aug_quest_id,
                        'question': sentence
                    }
                    results.append(res_i)
            else:
                p = pick_question(scs, ps, cs)
                sentence = to_sentence.index_to_question(p)
                # print(sentence)
                res_i = {
                    'image_id': int(image_id),
                    'question_id': question_id,
                    'question': sentence
                }
                results.append(res_i)

    save_json(res_file, results)
    return res_file
Пример #13
0
def ivqa_decoding_beam_search(checkpoint_path=None, subset='kpval'):
    model_config = ModelConfig()
    res_file = 'result/quest_vaq_greedy_%s.json' % FLAGS.model_type.upper()
    # Get model
    model_fn = get_model_creation_fn('VAQ-Var')
    create_fn = create_reader('VAQ-Var', phase='test')
    writer = ExperimentWriter('latex/examples_noimage_tmp')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # get data reader
    subset = 'kpval'
    reader = create_fn(batch_size=1, subset=subset, version=FLAGS.test_version)

    if checkpoint_path is None:
        # ckpt_dir = FLAGS.checkpoint_dir % (FLAGS.version, FLAGS.model_type)
        ckpt_dir = 'model/v1_var_att_noimage_cache_restval_VAQ-VarRL'
        ckpt = tf.train.get_checkpoint_state(ckpt_dir)
        checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = model_fn(model_config, 'sampling')
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

    num_batches = reader.num_batches

    print('Running beam search inference...')
    results = []
    for i in range(num_batches):
        outputs = reader.get_test_batch()

        # inference
        quest_ids, image_ids = outputs[-2:]
        scores, pathes = model.greedy_inference(outputs[:-2], sess)
        scores, pathes = post_process_prediction(scores, pathes)
        pathes, pathes_len = put_to_array(pathes)
        scores, pathes = find_unique_rows(scores, pathes)
        scores, pathes = post_process_prediction(scores, pathes[:, 1:])
        # question = to_sentence.index_to_question(pathes[0])
        # print('%d/%d: %s' % (i, num_batches, question))

        # show image
        os.system('clear')
        im_file = '%s2014/COCO_%s2014_%012d.jpg' % ('val', 'val', image_ids[0])
        im_path = os.path.join(IM_ROOT, im_file)
        # im = imread(im_path)
        # plt.imshow(im)
        ans, ans_len = outputs[1:1 + 2]
        answers = extract_gt(ans, ans_len)
        answer = to_sentence.index_to_answer(answers[0])
        # plt.title(answer)

        print('Answer: %s' % answer)
        questions = []
        for path in pathes:
            sentence = to_sentence.index_to_question(path)
            questions.append(sentence)
            print(sentence)
        # plt.show()
        writer.add_result(image_ids[0], quest_ids[0], im_path, answer,
                          questions)

        for quest_id, image_id, path in zip(quest_ids, image_ids, pathes):
            sentence = to_sentence.index_to_question(path)
            res_i = {
                'image_id': int(image_id),
                'question_id': int(quest_id),
                'question': sentence
            }
            results.append(res_i)

        if i == 40:
            break

    writer.render()
    return

    save_json(res_file, results)
    return res_file
Пример #14
0
def train():
    model_config = ModelConfig()
    training_config = TrainConfig()
    model_config.convert = FLAGS.convert
    # model_config.batch_size = 2

    # Get model
    model_fn = get_model_creation_fn(FLAGS.model_type)
    reader_fn = create_reader(FLAGS.model_type, phase='train')

    # setup environment
    env = IVQARewards(metric='bleu')

    # Create training directory.
    train_dir = FLAGS.train_dir % (FLAGS.version, FLAGS.model_type)
    if not tf.gfile.IsDirectory(train_dir):
        tf.logging.info("Creating training directory: %s", train_dir)
        tf.gfile.MakeDirs(train_dir)

    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = model_fn(model_config, 'train')
        model.build()

        # Set up the learning rate
        learning_rate = tf.constant(5e-5)

        # def _learning_rate_decay_fn(learn_rate, global_step):
        #     return tf.train.exponential_decay(
        #         learn_rate,
        #         global_step,
        #         decay_steps=training_config.decay_step,
        #         decay_rate=training_config.decay_factor,
        #         staircase=False)
        #
        # learning_rate_decay_fn = _learning_rate_decay_fn

        train_op = tf.contrib.layers.optimize_loss(
            loss=model.loss,
            global_step=model.global_step,
            learning_rate=learning_rate,
            optimizer=training_config.optimizer,
            clip_gradients=training_config.clip_gradients,
            learning_rate_decay_fn=None,
            variables=model.model_vars)

        # Set up the Saver for saving and restoring model checkpoints.
        saver = tf.train.Saver(
            max_to_keep=training_config.max_checkpoints_to_keep)

    # create reader
    reader = reader_fn(batch_size=16, subset='kptrain', version=FLAGS.version)

    # Run training.
    training_util.train(train_op,
                        model,
                        train_dir,
                        log_every_n_steps=FLAGS.log_every_n_steps,
                        graph=g,
                        global_step=model.global_step,
                        number_of_steps=FLAGS.number_of_steps,
                        init_fn=model.init_fn,
                        saver=saver,
                        reader=reader,
                        feed_fn=model.fill_feed_dict,
                        env=env)
Пример #15
0
def test(checkpoint_path=None):
    config = ModelConfig()
    config.phase = 'other'
    config.model_type = FLAGS.model_type

    beam_size = 10
    subset = 'kptest'
    # build data reader
    create_fn = create_reader(FLAGS.model_type, phase='test')
    reader = create_fn(batch_size=1, subset=subset, version='v1')
    if checkpoint_path is None:
        ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir % FLAGS.model_type)
        checkpoint_path = ckpt.model_checkpoint_path

    res_file = 'result/beamsearch_%s_%s.json' % (FLAGS.model_type.upper(), subset)
    cand_file = 'result/sampling_%s_%s.json' % (FLAGS.model_type.upper(), subset)

    # build and restore model
    model = InferenceWrapper()
    restore_fn = model.build_graph_from_config(config, checkpoint_path)

    sess = tf.Session(graph=tf.get_default_graph())
    tf.logging.info('Restore from model %s' % os.path.basename(checkpoint_path))
    restore_fn(sess)

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset=FLAGS.model_trainset)
    generator = caption_generator.CaptionGenerator(model, to_sentence.question_vocab,
                                                   beam_size=beam_size)

    results = []
    candidates = []
    print('Running inference on split %s...' % TEST_SET)
    num_batches = reader.num_batches
    for i in range(num_batches):
        outputs = reader.get_test_batch()
        im_feed, attr, ans_seq, ans_seq_len, quest_id, image_id = outputs

        image_id = int(image_id)
        quest_id = int(quest_id)
        im_feed = np.squeeze(im_feed)
        captions = generator.beam_search(sess, [im_feed, attr, ans_seq, ans_seq_len])

        print('============== %d ============' % i)
        print('image id: %d, question id: %d' % (image_id, quest_id))
        # print('question\t: %s' % question)
        tmp = []
        vaq_cands_i = {'question_id': quest_id, 'image_id': image_id}
        for c, g in enumerate(captions):
            quest = to_sentence.index_to_question(g.sentence)
            tmp.append(quest)
            print('[%02d]: %s' % (c, quest))

        vaq_cands_i['candidates'] = tmp
        candidates.append(vaq_cands_i)

        caption = captions[0]
        sentence = to_sentence.index_to_question(caption.sentence)
        res_i = {'image_id': image_id, 'question_id': quest_id, 'question': sentence}
        results.append(res_i)
    save_json(res_file, results)
    save_json(cand_file, candidates)
    return res_file
Пример #16
0
def ivqa_decoding_beam_search(checkpoint_path=None):
    model_config = ModelConfig()
    method = FLAGS.method
    res_file = 'result/bs_gen_%s.json' % method
    score_file = 'result/bs_vqa_scores_%s.mat' % method
    # Get model
    model_fn = get_model_creation_fn('VAQ-Var')
    create_fn = create_reader('VAQ-VVIS', phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # get data reader
    subset = 'kptest'
    reader = create_fn(batch_size=1, subset=subset, version=FLAGS.test_version)

    exemplar = ExemplarLanguageModel()

    if checkpoint_path is None:
        if FLAGS.checkpoint_dir:
            ckpt_dir = FLAGS.checkpoint_dir
        else:
            ckpt_dir = FLAGS.checkpoint_pat % (FLAGS.version, FLAGS.model_type)
        # ckpt_dir = '/import/vision-ephemeral/fl302/models/v2_kpvaq_VAQ-RL/'
        ckpt = tf.train.get_checkpoint_state(ckpt_dir)
        checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.ex
        model = model_fn(model_config, 'sampling')
        model.set_num_sampling_points(1000)
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

        # build language model
        language_model = LanguageModel()
        language_model.build()
        language_model.set_cache_dir('test_empty')
        # language_model.set_cache_dir('v1_var_att_lowthresh_cache_restval_VAQ-VarRL')
        language_model.set_session(sess)
        language_model.setup_model()

        # build VQA model
        vqa_model = VQAWrapper(g, sess)
    # vqa_model = MLBWrapper()
    num_batches = reader.num_batches

    print('Running beam search inference...')
    results = []
    batch_vqa_scores = []

    num = FLAGS.max_iters if FLAGS.max_iters > 0 else num_batches
    for i in range(num):

        outputs = reader.get_test_batch()

        # inference
        quest_ids, image_ids = outputs[-2:]
        im, _, _, top_ans, ans_tokens, ans_len = outputs[:-2]
        # pdb.set_trace()
        if top_ans == 2000:
            continue

        print('\n%d/%d' % (i, num))
        question_id = int(quest_ids[0])
        image_id = int(image_ids[0])

        t1 = time()
        pathes, scores = model.greedy_inference([im, ans_tokens, ans_len],
                                                sess)

        # find unique
        ivqa_scores, ivqa_pathes = process_one(scores, pathes)
        t2 = time()
        print('Time for sample generation: %0.2fs' % (t2 - t1))

        # apply language model
        language_model_inputs = wrap_samples_for_language_model(
            [ivqa_pathes], pad_token=model.pad_token - 1, max_length=20)
        match_gt = exemplar.query(ivqa_pathes)
        legality_scores = language_model.inference(language_model_inputs)
        legality_scores[match_gt] = 1.0
        num_keep = max(100, (legality_scores > 0.1).sum())  # no less than 100
        valid_inds = (-legality_scores).argsort()[:num_keep]

        t3 = time()
        print('Time for language model filtration: %0.2fs' % (t3 - t2))

        # for idx in valid_inds:
        #     path = ivqa_pathes[idx]
        #     sc = legality_scores[idx]
        #     sentence = to_sentence.index_to_question(path)
        #     # questions.append(sentence)
        #     print('%s (%0.3f)' % (sentence, sc))

        # apply  VQA model
        sampled = [ivqa_pathes[_idx] for _idx in valid_inds]
        # vqa_scores = vqa_model.get_scores(sampled, image_id, top_ans)
        vqa_scores, is_valid = vqa_model.get_scores(sampled, im, top_ans)
        # conf_inds = (-vqa_scores).argsort()[:20]
        conf_inds = np.where(is_valid)[0]
        # pdb.set_trace()
        # conf_inds = (-vqa_scores).argsort()[:40]

        t4 = time()
        print('Time for VQA verification: %0.2fs' % (t4 - t3))

        this_mean_vqa_score = vqa_scores[conf_inds].mean()
        print('sampled: %d, unique: %d, legal: %d, gt: %d, mean score %0.2f' %
              (pathes.shape[0], len(ivqa_pathes), num_keep, match_gt.sum(),
               this_mean_vqa_score))
        batch_vqa_scores.append(this_mean_vqa_score)

        for _pid, idx in enumerate(conf_inds):
            path = sampled[idx]
            sc = vqa_scores[idx]
            sentence = to_sentence.index_to_question(path)
            aug_quest_id = question_id * 1000 + _pid
            res_i = {
                'image_id': int(image_id),
                'question_id': aug_quest_id,
                'question': sentence,
                'score': float(sc)
            }
            results.append(res_i)

    save_json(res_file, results)
    batch_vqa_scores = np.array(batch_vqa_scores, dtype=np.float32)
    mean_vqa_score = batch_vqa_scores.mean()
    from scipy.io import savemat
    savemat(score_file, {
        'scores': batch_vqa_scores,
        'mean_score': mean_vqa_score
    })
    print('BS mean VQA score: %0.3f' % mean_vqa_score)
    return res_file, mean_vqa_score
Пример #17
0
def ivqa_decoding_beam_search(checkpoint_path=None, subset='kpval'):
    model_config = ModelConfig()
    res_file = 'result/quest_vaq_greedy_%s.json' % FLAGS.model_type.upper()
    # Get model
    # model_fn = get_model_creation_fn(FLAGS.model_type)
    create_fn = create_reader(FLAGS.model_type, phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # get data reader
    reader = create_fn(batch_size=80,
                       subset=subset,
                       version=FLAGS.test_version)

    # if checkpoint_path is None:
    #     ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir %
    #                                          (FLAGS.version,
    #                                           FLAGS.model_type))
    #     checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    # g = tf.Graph()
    # with g.as_default():
    #     # Build the model.
    #     model = model_fn(model_config, 'beam')
    #     model.build()
    #     # Restore from checkpoint
    #     restorer = Restorer(g)
    #     sess = tf.Session()
    #     restorer.restore(sess, checkpoint_path)

    num_batches = reader.num_batches

    print('Running beam search inference...')
    results = []
    for i in range(num_batches):
        outputs = reader.get_test_batch()

        # inference
        # quest_ids, image_ids = outputs[-2:]
        # scores, pathes = model.greedy_inference(outputs[:-2], sess)
        im, capt, capt_len, ans_seq, ans_seq_len, quest_ids, image_ids = outputs

        _, res, res_len, _, _, _, _, = reader.get_test_batch()

        pathes = parse_gt_questions(capt, capt_len)
        question = to_sentence.index_to_question(pathes[0])
        gts = [to_sentence.index_to_question(q) for q in pathes]
        gts_token = [' '.join([str(t) for t in path]) for path in pathes]

        respathes = parse_gt_questions(res, res_len)
        res = [to_sentence.index_to_question(q) for q in respathes]
        res_token = [' '.join([str(t) for t in path]) for path in respathes]

        scores = compute_cider_token_1vsall(quest_ids, res_token)
        import pdb
        pdb.set_trace()

        # gts_token = []
        # # compute_cider(quest_ids, gts, res)
        # compute_cider_token(quest_ids, gts_token, res_token)
        # import pdb
        # pdb.set_trace()
        # print('%d/%d: %s' % (i, num_batches, question))
        #
        # for quest_id, image_id, path in zip(quest_ids, image_ids, pathes):
        #     sentence = to_sentence.index_to_question(path)
        #     res_i = {'image_id': int(image_id), 'question_id': int(quest_id), 'question': sentence}
        #     results.append(res_i)

    save_json(res_file, results)
    return res_file
Пример #18
0
def train():
    model_config = ModelConfig()
    training_config = TrainConfig()

    # Get model
    model_fn = get_model_creation_fn(FLAGS.model_type)
    reader_fn = create_reader('VAQ-EpochAtt', phase='train')

    env = MixReward(attention_vqa=True)
    env.set_cider_state(use_cider=True)
    env.diversity_reward.mode = 'winner_take_all'
    env.set_language_thresh(1.0 / 3.0)
    # env.set_replay_buffer(insert_thresh=0.1,
    #                       sv_dir='vqa_replay_buffer/low_att')  # if 0.5, already fooled others

    # Create training directory.
    train_dir = FLAGS.train_dir % (FLAGS.version, FLAGS.model_type)
    if not tf.gfile.IsDirectory(train_dir):
        tf.logging.info("Creating training directory: %s", train_dir)
        tf.gfile.MakeDirs(train_dir)
    ckpt_suffix = train_dir.split('/')[-1]

    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = model_fn(model_config, 'train')
        model.set_init_ckpt(
            'model/v1_var_ivqa_restvalr2_VAQ-Var/model.ckpt-374000')
        model.build()

        # Set up the learning rate.u
        learning_rate = tf.constant(training_config.initial_learning_rate *
                                    0.1)

        def _learning_rate_decay_fn(learn_rate, global_step):
            return tf.train.exponential_decay(
                learn_rate,
                global_step,
                decay_steps=training_config.decay_step,
                decay_rate=training_config.decay_factor,
                staircase=False)

        learning_rate_decay_fn = _learning_rate_decay_fn

        train_op = tf.contrib.layers.optimize_loss(
            loss=model.loss,
            global_step=model.global_step,
            learning_rate=learning_rate,
            optimizer=training_config.optimizer,
            clip_gradients=training_config.clip_gradients,
            learning_rate_decay_fn=learning_rate_decay_fn)

        # Set up the Saver for saving and restoring model checkpoints.
        saver = tf.train.Saver(
            max_to_keep=training_config.max_checkpoints_to_keep)

        # Setup summaries
        summary_op = tf.summary.merge_all()

        # Setup language model
        lm = LanguageModel()
        lm.build()
        lm.set_cache_dir(ckpt_suffix)
        env.set_language_model(lm)

    # create reader
    reader = reader_fn(
        batch_size=16,
        subset='kprestval',  # 'kptrain'
        version=FLAGS.version)

    # Run training.
    training_util.train(train_op,
                        train_dir,
                        log_every_n_steps=FLAGS.log_every_n_steps,
                        graph=g,
                        global_step=model.global_step,
                        number_of_steps=FLAGS.number_of_steps,
                        init_fn=model.init_fn,
                        saver=saver,
                        reader=reader,
                        model=model,
                        summary_op=summary_op,
                        env=env)
def ivqa_decoding_beam_search(checkpoint_path=None, subset='kptest'):
    model_config = ModelConfig()
    res_file = 'result/aug_var_vaq_kl0_greedy_%s.json' % FLAGS.model_type.upper(
    )
    # Get model
    model_fn = get_model_creation_fn(FLAGS.model_type)
    create_fn = create_reader('VAQ-Var', phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # get data reader
    reader = create_fn(batch_size=1, subset=subset, version=FLAGS.test_version)

    if checkpoint_path is None:
        ckpt_dir = FLAGS.checkpoint_dir % (FLAGS.version, FLAGS.model_type)
        # ckpt_dir = '/import/vision-ephemeral/fl302/models/v2_kpvaq_VAQ-RL/'
        ckpt = tf.train.get_checkpoint_state(ckpt_dir)
        checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = model_fn(model_config, 'sampling_beam')
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

    num_batches = reader.num_batches

    print('Running beam search inference...')
    results = []
    for i in range(num_batches):
        print('iter: %d/%d' % (i, num_batches))
        outputs = reader.get_test_batch()

        # inference
        quest_ids, image_ids = outputs[-2:]
        scores, pathes = model.greedy_inference(outputs[:-2], sess)
        scores = np.tile(scores[:, np.newaxis], [1, pathes.shape[1]])
        # scores, pathes = post_process_prediction(scores, pathes)

        _ntot = len(pathes)
        scores, pathes, ivqa_counts = post_process_variation_questions_with_count(
            scores, pathes, 1)

        question_id = int(quest_ids[0])
        image_id = image_ids[0]

        print('%d/%d' % (len(pathes[0]), _ntot))
        for _p_idx, (path, sc) in enumerate(zip(pathes[0], scores[0])):
            sentence = to_sentence.index_to_question(path)
            aug_quest_id = question_id * 1000 + _p_idx
            # res_i = {'image_id': int(image_id),
            #          'question_id': aug_quest_id,
            #          'question': sentence}
            res_i = {
                'image_id': int(image_id),
                'question_id': aug_quest_id,
                'question': sentence,
                'question_inds': path,
                'counts': len(pathes),
                'probs': float(sc)
            }
            results.append(res_i)

    save_json(res_file, results)
    return res_file
Пример #20
0
def var_vqa_decoding_beam_search(checkpoint_path=None, subset='kpval'):
    model_config = ModelConfig()
    res_file = 'result/quest_vaq_greedy_%s.json' % FLAGS.model_type.upper()
    # Get model
    model_fn = get_model_creation_fn(FLAGS.model_type)
    create_fn = create_reader('V7W-VarDS', phase='test')
    writer = ExperimentWriter('latex/v7w_%s' % FLAGS.model_type.lower())

    # Create the vocabulary.
    to_sentence = SentenceGenerator(
        trainset='train',
        ans_vocab_file='data2/v7w_train_answer_word_counts.txt',
        quest_vocab_file='data2/v7w_train_question_word_counts.txt',
        top_ans_file='data2/v7w_train_top2000_answers.txt')

    # get data reader
    subset = 'val'
    reader = create_fn(batch_size=1, subset=subset, version=FLAGS.test_version)

    if checkpoint_path is None:
        ckpt_dir = FLAGS.checkpoint_dir % (FLAGS.trainset, FLAGS.model_type)
        # ckpt_dir = '/import/vision-ephemeral/fl302/models/v2_kpvaq_VAQ-RL/'
        ckpt = tf.train.get_checkpoint_state(ckpt_dir)
        checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = model_fn(model_config, 'sampling')
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

    num_batches = reader.num_batches

    print('Running beam search inference...')
    results = []
    for i in range(num_batches):
        outputs = reader.get_test_batch()
        # pdb.set_trace()

        # inference
        images, quest, quest_len, ans, ans_len, quest_ids, image_ids = outputs
        scores, pathes = model.greedy_inference([images, quest, quest_len],
                                                sess)
        scores, pathes = post_process_prediction(scores, pathes)
        pathes, pathes_len = put_to_array(pathes)
        scores, pathes = find_unique_rows(scores, pathes)
        scores, pathes = post_process_prediction(scores, pathes[:, 1:])
        # question = to_sentence.index_to_question(pathes[0])
        # print('%d/%d: %s' % (i, num_batches, question))

        # show image
        os.system('clear')
        image_id = image_ids[0]
        im_path = _get_vg_image_root(image_id)
        # im = imread(im_path)
        # plt.imshow(im)
        questions = extract_gt(quest, quest_len)
        question = to_sentence.index_to_question(questions[0])
        print('Question: %s' % question)

        answers = extract_gt(ans, ans_len)
        answer = to_sentence.index_to_answer(answers[0])
        # plt.title(answer)

        print('Answer: %s' % answer)
        answers = []
        for path in pathes:
            sentence = to_sentence.index_to_answer(path)
            answers.append(sentence)
            print(sentence)
        # plt.show()
        qa = '%s - %s' % (question, answer)
        writer.add_result(image_ids[0], quest_ids[0], im_path, qa, answers)

        for quest_id, image_id, path in zip(quest_ids, image_ids, pathes):
            sentence = to_sentence.index_to_question(path)
            res_i = {
                'image_id': int(image_id),
                'question_id': int(quest_id),
                'question': sentence
            }
            results.append(res_i)

        if i == 40:
            break

    writer.render()
    return
def ivqa_decoding_beam_search(checkpoint_path=None):
    model_config = ModelConfig()
    method = FLAGS.method
    res_file = 'result/bs_cand_for_vis.json'
    # Get model
    model_fn = get_model_creation_fn('VAQ-Var')
    create_fn = create_reader('VAQ-VVIS', phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval',
                                    top_ans_file='../VQA-tensorflow/data/vqa_trainval_top2000_answers.txt')

    # get data reader
    subset = 'kpval'
    reader = create_fn(batch_size=1, subset=subset,
                       version=FLAGS.test_version)

    exemplar = ExemplarLanguageModel()

    if checkpoint_path is None:
        if FLAGS.checkpoint_dir:
            ckpt_dir = FLAGS.checkpoint_dir
        else:
            ckpt_dir = FLAGS.checkpoint_pat % (FLAGS.version, FLAGS.model_type)
        # ckpt_dir = '/import/vision-ephemeral/fl302/models/v2_kpvaq_VAQ-RL/'
        ckpt = tf.train.get_checkpoint_state(ckpt_dir)
        checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.ex
        model = model_fn(model_config, 'sampling')
        model.set_num_sampling_points(5000)
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

        # build language model
        language_model = LanguageModel()
        language_model.build()
        language_model.set_cache_dir('test_empty')
        # language_model.set_cache_dir('v1_var_att_lowthresh_cache_restval_VAQ-VarRL')
        language_model.set_session(sess)
        language_model.setup_model()

        # build VQA model
    # vqa_model = N2MNWrapper()
    # vqa_model = MLBWrapper()
    num_batches = reader.num_batches

    quest_ids_to_vis = {5682052: 'bread',
                        965492: 'plane',
                        681282: 'station'}

    print('Running beam search inference...')
    results = []
    batch_vqa_scores = []

    num = FLAGS.max_iters if FLAGS.max_iters > 0 else num_batches
    for i in range(num):

        outputs = reader.get_test_batch()

        # inference
        quest_ids, image_ids = outputs[-2:]
        quest_id_key = int(quest_ids)

        if quest_id_key not in quest_ids_to_vis:
            continue
        # pdb.set_trace()

        im, gt_q, _, top_ans, ans_tokens, ans_len = outputs[:-2]
        # pdb.set_trace()
        if top_ans == 2000:
            continue

        print('\n%d/%d' % (i, num))
        question_id = int(quest_ids[0])
        image_id = int(image_ids[0])

        t1 = time()
        pathes, scores = model.greedy_inference([im, ans_tokens, ans_len], sess)

        # find unique
        ivqa_scores, ivqa_pathes = process_one(scores, pathes)
        t2 = time()
        print('Time for sample generation: %0.2fs' % (t2 - t1))

        # apply language model
        language_model_inputs = wrap_samples_for_language_model([ivqa_pathes],
                                                                pad_token=model.pad_token - 1,
                                                                max_length=20)
        match_gt = exemplar.query(ivqa_pathes)
        legality_scores = language_model.inference(language_model_inputs)
        legality_scores[match_gt] = 1.0
        num_keep = max(100, (legality_scores > 0.1).sum())  # no less than 100
        valid_inds = (-legality_scores).argsort()[:num_keep]
        print('keep: %d/%d' % (num_keep, len(ivqa_pathes)))

        t3 = time()
        print('Time for language model filtration: %0.2fs' % (t3 - t2))

        def token_arr_to_list(arr):
            return arr.flatten().tolist()

        for _pid, idx in enumerate(valid_inds):
            path = ivqa_pathes[idx]
            # sc = vqa_scores[idx]
            sentence = to_sentence.index_to_question(path)
            aug_quest_id = question_id * 1000 + _pid
            res_i = {'image_id': int(image_id),
                     'aug_id': aug_quest_id,
                     'question_id': question_id,
                     'target': sentence,
                     'top_ans_id': int(top_ans),
                     'question': to_sentence.index_to_question(token_arr_to_list(gt_q)),
                     'answer': to_sentence.index_to_answer(token_arr_to_list(ans_tokens))}
            results.append(res_i)

    save_json(res_file, results)
    return None
model_fn = get_model_creation_fn('VAQ-VarRL')
model = model_fn()

model.build()
sess = tf.Session()
print('Init model')
model.init_fn(sess)

to_sentence = SentenceGenerator(trainset='trainval')

env = IVQARewards()
# env1 = IVQARewards(subset='kprestval')
env1 = VQARewards(ckpt_file='model/kprestval_VQA-BaseNorm/model.ckpt-26000')

create_fn = create_reader('VAQ-Var', phase='train')
reader = create_fn(batch_size=100, subset='kpval', version='v1')
reader.start()

import grammar_check

tool = grammar_check.LanguageTool('en-US')


def process_questions(sentences):
    def _is_valid(s):
        q = to_sentence.index_to_question(s).replace(" 's", "'s").capitalize()
        return False if tool.check(q) else True

    is_valid = [_is_valid(s) for s in sentences]
    return is_valid
def var_vqa_decoding_beam_search(checkpoint_path=None, subset='kpval'):
    model_config = ModelConfig()
    res_file = 'result/quest_vaq_greedy_%s.json' % FLAGS.model_type.upper()
    # Get model
    model_fn = get_model_creation_fn(FLAGS.model_type)
    create_fn = create_reader(FLAGS.model_type, phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # get data reader
    subset = 'kpval'
    reader = create_fn(batch_size=1, subset=subset, version=FLAGS.test_version)

    if checkpoint_path is None:
        ckpt_dir = FLAGS.checkpoint_dir % (FLAGS.version, FLAGS.model_type)
        # ckpt_dir = '/import/vision-ephemeral/fl302/models/v2_kpvaq_VAQ-RL/'
        ckpt = tf.train.get_checkpoint_state(ckpt_dir)
        checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = model_fn(model_config, 'sampling')
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

    num_batches = reader.num_batches

    print('Running beam search inference...')
    results = []
    for i in range(num_batches):
        outputs = reader.get_test_batch()
        # pdb.set_trace()
        if i % 100 == 0:
            print('batch: %d/%d' % (i, num_batches))

        # inference
        images, quest, quest_len, ans, ans_len, quest_ids, image_ids = outputs
        scores, pathes = model.greedy_inference([images, quest, quest_len],
                                                sess)
        scores, pathes = post_process_prediction(scores, pathes)
        pathes, pathes_len = put_to_array(pathes)
        scores, pathes = find_unique_rows(scores, pathes)
        scores, pathes = post_process_prediction(scores, pathes[:, 1:])
        # question = to_sentence.index_to_question(pathes[0])
        # print('%d/%d: %s' % (i, num_batches, question))

        answers = []
        for path in pathes:
            sentence = to_sentence.index_to_answer(path)
            answers.append(sentence)
            # print(sentence)

        res_i = {'question_id': int(quest_ids[0]), 'answers': answers}
        results.append(res_i)

    eval_recall(results)
    return
def ivqa_decoding_beam_search(ckpt_dir, method):
    model_config = ModelConfig()
    inf_type = 'beam'
    assert (inf_type in ['beam', 'rand'])
    # method = FLAGS.method
    if inf_type == 'rand':
        res_file = 'result/bs_RL2_cands_LM_%s.json' % method
    else:
        res_file = 'result/bs_RL2_cands_LM_%s_BEAM.json' % method
    if os.path.exists(res_file):
        print('File %s already exist, skipped' % res_file)
        return
    # score_file = 'result/bs_vqa_scores_%s.mat' % method
    # Get model
    model_fn = get_model_creation_fn('VAQ-Var')
    create_fn = create_reader('VAQ-VVIS', phase='test')

    # Create the vocabulary.
    to_sentence = SentenceGenerator(trainset='trainval')

    # get data reader
    subset = 'bs_test'
    reader = create_fn(batch_size=1, subset=subset,
                       version=FLAGS.test_version)

    exemplar = ExemplarLanguageModel()

    # if checkpoint_path is None:
    #     if FLAGS.checkpoint_dir:
    #         ckpt_dir = FLAGS.checkpoint_dir
    #     else:
    #         ckpt_dir = FLAGS.checkpoint_pat % (FLAGS.version, FLAGS.model_type)
    # ckpt_dir = '/import/vision-ephemeral/fl302/models/v2_kpvaq_VAQ-RL/'
    ckpt = tf.train.get_checkpoint_state(ckpt_dir)
    checkpoint_path = ckpt.model_checkpoint_path

    # Build model
    g = tf.Graph()
    with g.as_default():
        # Build the model.ex
        if inf_type == 'rand':
            model = model_fn(model_config, 'sampling')
            model.set_num_sampling_points(1000)
        else:
            model = model_fn(model_config, 'sampling_beam')
            model.set_num_sampling_points(1000)
        model.build()
        # Restore from checkpoint
        restorer = Restorer(g)
        sess = tf.Session()
        restorer.restore(sess, checkpoint_path)

        # build language model
        language_model = LanguageModel()
        language_model.build()
        language_model.set_cache_dir('test_empty')
        # language_model.set_cache_dir('v1_var_att_lowthresh_cache_restval_VAQ-VarRL')
        language_model.set_session(sess)
        language_model.setup_model()

        # build VQA model
    # vqa_model = N2MNWrapper()
    # vqa_model = MLBWrapper()
    num_batches = reader.num_batches

    print('Running beam search inference...')
    results = {}
    # batch_vqa_scores = []

    num = FLAGS.max_iters if FLAGS.max_iters > 0 else num_batches
    for i in range(num):
        outputs = reader.get_test_batch()

        # inference
        quest_ids, image_ids = outputs[-2:]
        im, _, _, top_ans, ans_tokens, ans_len = outputs[:-2]
        # pdb.set_trace()
        if top_ans == 2000:
            continue

        print('\n%d/%d' % (i, num))
        question_id = int(quest_ids[0])
        image_id = int(image_ids[0])

        t1 = time()
        pathes, scores = model.greedy_inference([im, ans_tokens, ans_len], sess)

        # find unique
        ivqa_scores, ivqa_pathes = process_one(scores, pathes)
        t2 = time()
        print('Time for sample generation: %0.2fs' % (t2 - t1))

        # apply language model
        language_model_inputs = wrap_samples_for_language_model([ivqa_pathes],
                                                                pad_token=model.pad_token - 1,
                                                                max_length=20)
        match_gt = exemplar.query(ivqa_pathes)
        legality_scores = language_model.inference(language_model_inputs)
        legality_scores[match_gt] = 1.0
        num_keep = max(100, (legality_scores > 0.3).sum())  # no less than 100
        valid_inds = (-legality_scores).argsort()[:num_keep]

        t3 = time()
        print('Time for language model filtration: %0.2fs' % (t3 - t2))

        # for idx in valid_inds:
        #     path = ivqa_pathes[idx]
        #     sc = legality_scores[idx]
        #     sentence = to_sentence.index_to_question(path)
        #     # questions.append(sentence)
        #     print('%s (%0.3f)' % (sentence, sc))

        # apply  VQA model
        sampled = [ivqa_pathes[_idx] for _idx in valid_inds]
        legality_scores = legality_scores[valid_inds]

        result_key = int(question_id)
        tmp = []
        for idx, path in enumerate(sampled):
            # path = sampled[idx]
            sc = legality_scores[idx]
            sentence = to_sentence.index_to_question(path)
            # aug_quest_id = question_id * 1000 + _pid
            res_i = {'image_id': int(image_id),
                     'aug_id': idx,
                     'question_id': question_id,
                     'question': sentence,
                     'score': float(sc)}
            tmp.append(res_i)
        print('Number of unique questions: %d' % len(tmp))
        results[result_key] = tmp

    save_json(res_file, results)
Пример #25
0
def train():
    model_config = ModelConfig()
    training_config = TrainConfig()

    # Get model
    model_fn = get_model_creation_fn(FLAGS.model_type)
    reader_fn = create_reader('VAQ-Var', phase='train')

    env = MixReward()
    env.diversity_reward.mode = 'winner_take_all'
    env.set_cider_state(False)
    env.set_language_thresh(0.2)

    # Create training directory.
    train_dir = FLAGS.train_dir % (FLAGS.version, FLAGS.model_type)
    if not tf.gfile.IsDirectory(train_dir):
        tf.logging.info("Creating training directory: %s", train_dir)
        tf.gfile.MakeDirs(train_dir)

    g = tf.Graph()
    with g.as_default():
        # Build the model.
        model = model_fn(model_config, 'train')
        model.build()

        # Set up the learning rate.u
        learning_rate = tf.constant(training_config.initial_learning_rate *
                                    0.1)

        def _learning_rate_decay_fn(learn_rate, global_step):
            return tf.train.exponential_decay(
                learn_rate,
                global_step,
                decay_steps=training_config.decay_step,
                decay_rate=training_config.decay_factor,
                staircase=False)

        learning_rate_decay_fn = _learning_rate_decay_fn

        train_op = tf.contrib.layers.optimize_loss(
            loss=model.loss,
            global_step=model.global_step,
            learning_rate=learning_rate,
            optimizer=training_config.optimizer,
            clip_gradients=training_config.clip_gradients,
            learning_rate_decay_fn=learning_rate_decay_fn)

        # Set up the Saver for saving and restoring model checkpoints.
        saver = tf.train.Saver(
            max_to_keep=training_config.max_checkpoints_to_keep)

        # Setup summaries
        summary_op = tf.summary.merge_all()

        # Setup language model
        lm = LanguageModel()
        lm.build()
        env.set_language_model(lm)

    # create reader
    reader = reader_fn(
        batch_size=16,
        subset='kprestval',  # 'kptrain'
        version=FLAGS.version)

    # Run training.
    training_util.train(train_op,
                        train_dir,
                        log_every_n_steps=FLAGS.log_every_n_steps,
                        graph=g,
                        global_step=model.global_step,
                        number_of_steps=FLAGS.number_of_steps,
                        init_fn=model.init_fn,
                        saver=saver,
                        reader=reader,
                        model=model,
                        summary_op=summary_op,
                        env=env)
def train():
    model_config = ModelConfig()
    training_config = TrainConfig()

    # Get model
    reader_fn = create_reader('VAQ-Var', phase='train')

    # Create training directory.
    train_dir = FLAGS.train_dir % (FLAGS.version, FLAGS.model_type)
    if not tf.gfile.IsDirectory(train_dir):
        tf.logging.info("Creating training directory: %s", train_dir)
        tf.gfile.MakeDirs(train_dir)

    g = tf.Graph()
    with g.as_default():
        # Build the model.
        sample_fn = get_model_creation_fn('VAQ-VarRL')
        sampler = sample_fn(model_config, 'train')
        sampler.set_epsion(0.98)
        sampler.build()

        # Build language model
        lm_fn = get_model_creation_fn(FLAGS.model_type)
        language_model = lm_fn()
        language_model.build()

        # Set up the learning rate.u
        learning_rate = tf.constant(training_config.initial_learning_rate)

        def _learning_rate_decay_fn(learn_rate, global_step):
            return tf.train.exponential_decay(
                learn_rate,
                global_step,
                decay_steps=training_config.decay_step,
                decay_rate=training_config.decay_factor,
                staircase=False)

        learning_rate_decay_fn = _learning_rate_decay_fn

        train_op = tf.contrib.layers.optimize_loss(
            loss=language_model.loss,
            learning_rate=learning_rate,
            global_step=sampler.global_step,
            optimizer=training_config.optimizer,
            clip_gradients=training_config.clip_gradients,
            learning_rate_decay_fn=learning_rate_decay_fn)

        # Set up the Saver for saving and restoring model checkpoints.
        var_list = tf.get_collection(tf.GraphKeys.VARIABLES, 'LM')
        saver = tf.train.Saver(
            var_list=var_list,
            max_to_keep=training_config.max_checkpoints_to_keep)

        # Setup summaries
        summary_op = tf.summary.merge_all()

    # create reader
    reader = reader_fn(
        batch_size=16,
        subset='kprestval',  # 'kptrain'
        version=FLAGS.version)

    # Run training.
    training_util.train(train_op,
                        train_dir,
                        log_every_n_steps=FLAGS.log_every_n_steps,
                        graph=g,
                        global_step=sampler.global_step,
                        number_of_steps=FLAGS.number_of_steps,
                        init_fn=sampler.init_fn,
                        saver=saver,
                        reader=reader,
                        model=language_model,
                        summary_op=summary_op,
                        sampler=sampler)