Beispiel #1
0
def validate(val_loader, model, criterion, epoch):
    results = predict(val_loader, model)
    vqa_eval = get_eval(results, cfg.TEST.SPLITS[0])

    # save result and accuracy
    result_file = os.path.join(cfg.LOG_DIR, 'result-{:03}.json'.format(epoch))
    json.dump(results, open(result_file, 'w'))
    acc_file = os.path.join(cfg.LOG_DIR, 'accuracy-{:03}.json'.format(epoch))
    json.dump(vqa_eval.accuracy, open(acc_file, 'w'))

    return vqa_eval.accuracy['overall']
Beispiel #2
0
def validate(val_loader, model, criterion, epoch, quesIds=None):
    results = predict(val_loader, model)
    if quesIds is None:
        vqa_eval = get_eval(results, cfg.TEST.SPLITS[0])
    else:
        vqa_eval = get_eval_subset(results, cfg.TEST.SPLITS[0], quesIds)

    # save result and accuracy
    #result_file = os.path.join(cfg.LOG_DIR, 'result-{:03}.json'.format(epoch))
    #json.dump(results, open(result_file, 'w'))
    #acc_file = os.path.join(cfg.LOG_DIR, 'accuracy-{:03}.json'.format(epoch))
    #json.dump(vqa_eval.accuracy, open(acc_file, 'w'))
    print(vqa_eval.accuracy['overall'])
    print(vqa_eval.accuracy['perAnswerType'])
    return vqa_eval.accuracy['overall']
def validate(val_loader, model, criterion, epoch):
    # list of list of dict{'question_id': que_id,'answer': model's answer} (batch x batch_size) x dict
    results = predict(val_loader, model)
    vqa_eval = get_eval(results, 'val2014')  # val2014

    return vqa_eval.accuracy['overall']  # 返回所有batch,所有样本的总和accuracy
Beispiel #4
0
    def __init__(self, model_info, split, save_dir):
        assert len(model_info) > 0
        assert len(cfg.TEST.SPLITS) == 1 and cfg.TEST.SPLITS[0] == split

        model_info = sorted(model_info, key=itemgetter(0))

        self._split = split
        self.model_info = model_info
        self.save_dir = save_dir

        # load model
        self._pred_ans = []
        self._scores = []
        self._att_weights = []
        dataset = VQADataset('test', model_info[0][0])
        emb_size = get_emb_size()
        for model_group_name, model_name, cp_file in model_info:
            cache_file = cp_file + '.cache'
            if os.path.isfile(cache_file):
                print("load from cache: '{}".format(cache_file))
                cache = pickle.load(open(cache_file, 'rb'))
                self._pred_ans.append(cache['pred_ans'])
                self._scores.append(cache['scores'])
                self._att_weights.append(cache['att_weights'])
                continue

            # dataset
            dataset.reload_obj(model_group_name)
            dataloader = torch.utils.data.DataLoader(dataset,
                                                     batch_size=args.bs,
                                                     shuffle=False,
                                                     num_workers=2,
                                                     pin_memory=True)
            # model
            model_group = import_module('models.' + model_group_name)
            model = getattr(model_group,
                            model_name)(num_words=dataset.num_words,
                                        num_ans=dataset.num_ans,
                                        emb_size=emb_size)
            checkpoint = torch.load(cp_file,
                                    map_location=lambda s, l: s.cuda(0))
            model.load_state_dict(checkpoint['state_dict'])
            model.cuda()
            model.eval()

            # predicting
            itoa = dataloader.dataset.codebook['itoa']
            batch_att_weight = []
            pred_ans = []
            bar = progressbar.ProgressBar()
            print('predicting answers...')
            # sample: (que_id, img, que, [obj])
            for sample in bar(dataloader):
                # setting hook
                att_weight_buff = torch.FloatTensor(len(sample[0]), 36)

                def get_weight(self, input, output):
                    att_weight_buff.copy_(output.data.view_as(att_weight_buff))

                hook = model.att_net.register_forward_hook(get_weight)

                # forward
                sample_var = [Variable(d).cuda() for d in list(sample)[1:]]
                score = model(*sample_var)
                att_weight = F.softmax(Variable(att_weight_buff)).data.numpy()
                batch_att_weight.append(att_weight)
                pred_ans.extend(format_result(sample[0], score, itoa))

                hook.remove()
            att_weights = np.vstack(batch_att_weight)

            # evaluation
            print('evaluting results...')
            if split in ('train2014', 'val2014'):
                vqa_eval = get_eval(pred_ans, split)
                scores = []
                for i in range(len(dataset)):
                    qid = int(dataset[i][0])
                    score = vqa_eval.evalQA.get(qid)
                    scores.append(score)
            else:
                scores = None

            self._pred_ans.append(pred_ans)
            self._scores.append(scores)
            self._att_weights.append(att_weights)

            # save cache
            cache = {}
            cache['pred_ans'] = pred_ans
            cache['scores'] = scores
            cache['att_weights'] = att_weights
            pickle.dump(cache, open(cache_file, 'wb'))

        print('done.')

        # load data
        print('load raw data...')
        split_fname = '{}/raw-{}.json'.format(cfg.DATA_DIR, split)
        self._data = json.load(open(split_fname))
        print('load boxes...')
        self._boxes = self._load_box()

        # query key
        self._question = None
        self._answer = None
        self._condition = None

        # query result
        self._r_question = None
        self._r_answer = None
        self._r_condition = None

        # dirty flag
        self._d_question = True
        self._d_answer = True
        self._d_condition = True

        self.last_results = None