Esempio n. 1
0
    def evaluation(self, batch_data, model_outputs, *args, **kwargs):
        from imix.models.vqa_models.mcan_mix import list2dict
        from imix.engine.organizer import is_by_iter
        if is_by_iter():
            batch_data = list2dict(batch_data)

        return [{'batch_score': model_outputs['batch_score']}], [{'batch_size': model_outputs['batch_size']}]
    def evaluation(self, batch_data, model_outputs, *args, **kwargs):
        from imix.models.vqa_models.mcan_mix import list2dict
        from imix.engine.organizer import is_by_iter
        if is_by_iter():
            batch_data = list2dict(batch_data)

        eval_result = self.evaluate(batch_data, model_outputs)

        return eval_result
Esempio n. 3
0
    def evaluation(self, batch_data, model_outputs, *args, **kwargs):
        from imix.models.vqa_models.mcan_mix import list2dict
        from imix.engine.organizer import is_by_iter
        if is_by_iter():
            batch_data = list2dict(batch_data)

        labels = list(batch_data['answers_scores'].split(1))
        q_ids, scores = batch_data['question_id'].split(1), model_outputs['scores'].to('cpu').split(1)
        predictions = list({'question_id': q_id, 'scores': score} for q_id, score in zip(q_ids, scores))
        # predictions, labels = self.data_pre_process(predictions, labels, *args,
        #                                             **kwargs)
        predictions = self.data_pre_process(predictions, *args, **kwargs)
        return predictions, labels
Esempio n. 4
0
    def evaluation(self, batch_data, model_outputs, *args, **kwargs):
        from imix.models.vqa_models.mcan_mix import list2dict
        from imix.engine.organizer import is_by_iter
        if is_by_iter():
            batch_data = list2dict(batch_data)

        predictions = []
        ques_id = batch_data['ques_id']
        score, label = model_outputs['scores'].max(1)
        for qid, l in zip(ques_id, label.cpu().numpy()):
            if torch.is_tensor(qid):
                predictions.append({qid.item(): l})
            else:
                predictions.append({qid: l})

        return predictions, score
Esempio n. 5
0
    def preprocess_data(self, batched_inputs: Dict) -> Dict:
        """padding data and turn data into cuda format.

        Args:
            batched_inputs: a Dict containing input data

        Returns:
            Dict: a dict containing input data
        """
        from imix.engine.organizer import is_by_iter
        if is_by_iter():
            batched_inputs = list2dict(batched_inputs)

        img_feat = batched_inputs['features']
        input_ids = batched_inputs['input_ids']
        input_mask = batched_inputs['input_mask']

        b, c, h, w = img_feat.shape
        feat = img_feat.view(b, c, -1)
        padded_feat = torch.zeros((b, c, 1024), dtype=torch.float)
        padded_feat[:, :, :h * w] = feat
        feat = padded_feat.unsqueeze(-1)
        # feat = feat.squeeze(0)
        feat = feat.cuda()

        input_ids = input_ids.cuda()
        input_mask = input_mask.cuda()

        batched_inputs['feature'] = feat
        batched_inputs['input_ids'] = input_ids
        batched_inputs['input_mask'] = ~input_mask

        if self.training:
            answers_scores = batched_inputs['answers_scores']
            answers_scores = answers_scores.cuda()
            batched_inputs['answers_scores'] = answers_scores

        return batched_inputs
Esempio n. 6
0
 def submit(self, batch_data, model_outputs, *args, **kwargs):
     # scores, labels = model_outputs['scores'].max(1)
     # q_ids = batch_data['question_id'].detach().numpy()
     # labels = labels.cpu().detach().numpy()
     # q2a = batch_data['quesid2ans']
     # predictions = list({
     # 	                   'question_id': int(qid),
     # 	                   'answer': q2a[l][0]
     #                    } for qid, l in zip(q_ids, labels))
     from imix.models.vqa_models.mcan_mix import list2dict
     from imix.engine.organizer import is_by_iter
     if is_by_iter():
         batch_data = list2dict(batch_data)
     q_ids, scores = batch_data['question_id'].split(1), model_outputs['scores'].to('cpu').split(1)
     predictions = list({'question_id': q_id, 'scores': score} for q_id, score in zip(q_ids, scores))
     predictions = self.data_pre_process(predictions, *args, **kwargs)
     # by yinyin q_ids should be question str;q2a should be the answer str
     q2a = batch_data['quesid2ans']
     predictions = list({
         'questionid': str(qid),
         'prediction': str(m[0][l])
     } for qid, l, m in zip(q_ids, predictions, q2a))
     return predictions