Example #1
0
def process():
    def _parse_image_id(image):
        return int(image.split('.')[0].split('_')[-1])

    model = AttentionModel()
    ans2top_ans = AnswerTokenToTopAnswer()

    task_data_dir = '/usr/data/fl302/code/utils/bs_data_maker'
    task_data_file = os.path.join(task_data_dir, 'task_data_for_verif.json')
    task_data = load_json(task_data_file)
    is_valid = []
    num = len(task_data)
    for i, info in enumerate(task_data):
        print('%d/%d' % (i, num))
        image = info['image']
        image_id = _parse_image_id(image)
        question = info['target']
        answer = info['answer']
        scores = model.inference(image_id, question)
        scores[:, -1] = -10.
        # pdb.set_trace()
        top_ans_id = ans2top_ans.direct_query(answer)
        if top_ans_id == 2000:
            raise Exception('Warning: answer oov')
        scores = scores.flatten()
        pred_top_ans_id = scores.argmax()
        is_valid.append(int(pred_top_ans_id == top_ans_id))

    n_valid = sum(is_valid)
    print('valid: %d/%d' % (n_valid, num))
    save_json(os.path.join(task_data_dir, 'task_data_verif_state.json'),
              is_valid)
 def __init__(self, ckpt_file='model/v1_vqa_VQA/v1_vqa_VQA_best2/model.ckpt-135000',
              use_dis_reward=False):
     self.g = tf.Graph()
     self.ckpt_file = ckpt_file
     from models.vqa_soft_attention import AttentionModel
     from vqa_config import ModelConfig
     config = ModelConfig()
     self.ans2id = AnswerTokenToTopAnswer()
     self.use_dis_reward = use_dis_reward
     with self.g.as_default():
         self.sess = tf.Session()
         self.model = AttentionModel(config, phase='test_broadcast')
         self.model.build()
         vars = tf.trainable_variables()
         self.saver = tf.train.Saver(var_list=vars)
         self.saver.restore(self.sess, ckpt_file)
Example #3
0
 def __init__(self, ckpt_file='/usr/data/fl302/code/inverse_vqa/model/mlb_attention_v2/model.ckpt-170000',
              use_dis_reward=False):
     self.g = tf.Graph()
     self.ckpt_file = ckpt_file
     self.v1tov2 = TopAnswerVersionConverter()
     from models.vqa_soft_attention_v2 import AttentionModel
     from vqa_config import ModelConfig
     config = ModelConfig()
     self.ans2id = AnswerTokenToTopAnswer()
     self.use_dis_reward = use_dis_reward
     with self.g.as_default():
         self.sess = tf.Session()
         self.model = AttentionModel(config, phase='test_broadcast')
         self.model.build()
         vars = tf.trainable_variables()
         self.saver = tf.train.Saver(var_list=vars)
         self.saver.restore(self.sess, ckpt_file)
Example #4
0
 def __init__(self, ckpt_file='', use_dis_reward=False,
              use_attention_model=False):
     self.g = tf.Graph()
     self.ckpt_file = ckpt_file
     self.use_attention_model = use_attention_model
     from models.vqa_base import BaseModel
     from vqa_config import ModelConfig
     config = ModelConfig()
     self.ans2id = AnswerTokenToTopAnswer()
     self.use_dis_reward = use_dis_reward
     with self.g.as_default():
         self.sess = tf.Session()
         if self.use_attention_model:
             self.model = AttentionModel(config, phase='test')
             self.model.build()
         else:
             self.model = BaseModel(config, phase='test')
             self.model.build()
         vars = tf.trainable_variables()
         self.saver = tf.train.Saver(var_list=vars)
         self.saver.restore(self.sess, ckpt_file)
 def __init__(self, use_dis_reward=False):
     self.g = tf.Graph()
     self.ans2id = AnswerTokenToTopAnswer()
     self.use_dis_reward = use_dis_reward
     self.model = None