def main(_):
    # FLAGS.ckpt_file
    ckpt_file = 'model/v1_var_rl_att2_restval_VAQ-VarRL2_r2/model.ckpt-150000'
    res_file, mean_vqa_score = ivqa_decoding_beam_search(ckpt_file)
    from eval_vqa_question_oracle import evaluate_oracle
    evaluate_oracle(res_file)
    print('BS mean VQA score: %0.3f' % mean_vqa_score)
示例#2
0
def main():
    res1 = load_results('vae_ia_rl_attention2_run0')
    # res1 = load_results('vae_ia_rl_attention2_run2')
    res2 = load_results('vae_rl1')
    # res2 = load_results('vae_ia_rl_mlb_r2')
    res_file, mean_vqa_score, mean_unk_count = merge_result(res1, res2)
    from eval_vqa_question_oracle import evaluate_oracle
    evaluate_oracle(res_file)
    print('BS mean VQA score: %0.3f' % mean_vqa_score)
    print('BS mean #questions: %0.3f' % mean_unk_count)
 def test_model(model_path):
     with tf.Graph().as_default():
         res_file = ivqa_decoding_beam_search(checkpoint_path=model_path,
                                              subset=subset)
         if FLAGS.mode == 'full':
             cider = evaluate_oracle(res_file, split=target_split)
         else:
             cider = evaluate_question_standard(res_file)
     return float(cider)
示例#4
0
def compute_merged_result(method):
    res1 = load_results('reference')
    num = len(res1)
    res2 = load_results(method)
    # res2 = load_results('vae_ia_rl_mlb_r2')
    res_file, mean_vqa_score, mean_unk_count = merge_result(res1, res2)
    from eval_vqa_question_oracle import evaluate_oracle
    scores = evaluate_oracle(res_file)

    return scores[1], mean_vqa_score
 def _procss_worker(ckpt_file, cur_iter):
     if cur_iter in backup_exps:
         # backup model
         print('Copying model')
         ckpt_name = os.path.basename(ckpt_file)
         _backup_model(ckpt_file, os.path.join(model_backup_dir, ckpt_name))
         print('Done')
         run_id = backup_exps[cur_iter]
         # max_iters = 0
         max_iters = 500
     else:
         run_id = 100
         max_iters = 500
     # run sampling
     res_file, ms, mc = ivqa_decoding_beam_search(ckpt_file, run_id,
                                                  max_iters)
     # evaluate
     res = evaluate_oracle(res_file)
     mer_cider, mer_s = compute_merged_result('%s_run%d' %
                                              (FLAGS.method, run_id))
     # pdb.set_trace()
     res_str = 'O-C: %0.3f, O-B4: %0.3f, m-S: %0.3f, m-C: %0.2f, mer-O-C: %0.3f' % (
         res[1], res[0], ms, mc, mer_cider)
     return res_str
示例#6
0
            }
            new_results.append(res_i)

    scores += scores_i
    nums.append(len(scores_i))
    scored_qs.append({
        'question_id': qid,
        'question_inds': qs,
        'lm_scores': scores_i
    })

maxprob_res_file = res_file = '/data1/fl302/projects/inverse_vqa/result/var_vaq_rand_IVQA-BASIC_lmflt_maxprob.json'
save_json(maxprob_res_file, maxprob_results)
evaluate_question_standard(maxprob_res_file)

sv_file = '/data1/fl302/projects/inverse_vqa/result/var_vaq_rand_IVQA-BASIC_lmscores.json'
save_json(sv_file, scored_qs)

print('Average #questions: %0.2f' % np.mean(nums))
print('Average LM scores: %0.2f' % np.mean(scores))

from eval_vqa_question_oracle import evaluate_oracle

new_res_file = '/data1/fl302/projects/inverse_vqa/result/var_vaq_rand_IVQA-BASIC_lmflt.json'
save_json(new_res_file, new_results)
cider = evaluate_oracle(new_res_file, split='val')

import pdb

pdb.set_trace()
示例#7
0
def main(_):
    # FLAGS.ckpt_file
    res_file, mean_vqa_score = ivqa_decoding_beam_search(None)
    from eval_vqa_question_oracle import evaluate_oracle
    evaluate_oracle(res_file)
    print('BS mean VQA score: %0.3f' % mean_vqa_score)
示例#8
0
 def test_model(model_path):
     with tf.Graph().as_default():
         res_file = ivqa_decoding_beam_search(checkpoint_path=model_path,
                                              subset=subset)
         cider = evaluate_oracle(res_file)
     return cider