コード例 #1
0
ファイル: dataset.py プロジェクト: sameerdharur/sorting-vqa
    def load_item(self, idx):
        sample_info = self.imdb[idx]
        current_sample = Sample()
        current_sample.dataset_name = self.dataset

        if self.dataset == 'train_vqa':

            text_processor_argument = {
                "tokens": sample_info["question_tokens"]
            }
            processed_question = self.text_processor(text_processor_argument)
            current_sample.text_len = torch.tensor(len(
                sample_info["question_tokens"]),
                                                   dtype=torch.int)
            current_sample.text = processed_question["text"]
            current_sample.question_text = sample_info["question_str"]
            current_sample.text_sq = current_sample.text
            current_sample.text_oq = current_sample.text
            current_sample.reasoning_question = sample_info["question_str"]
            current_sample.reasoning_answer = sample_info["answers"][0]
            current_sample.sub_question = sample_info["question_str"]
            current_sample.other_question = sample_info["question_str"]

        elif self.dataset == 'train_introspect' or self.dataset == 'test':

            text_processor_argument = {
                "text": sample_info["main_question_str"]
            }
            processed_question = self.text_processor(text_processor_argument)
            current_sample.text = processed_question["text"]
            if "sub_question_str" in sample_info:
                text_processor_argument_sq = {
                    "text": sample_info["sub_question_str"]
                }
                processed_question_sq = self.text_processor(
                    text_processor_argument_sq)
                current_sample.text_sq = processed_question_sq["text"]

            if "other_question_str" in sample_info:
                text_processor_argument_oq = {
                    "text": sample_info["other_question_str"]
                }
                processed_question_oq = self.text_processor(
                    text_processor_argument_oq)
                current_sample.text_oq = processed_question_oq["text"]

            current_sample.question_text = sample_info["main_question_str"]
            current_sample.reasoning_question = sample_info[
                "main_question_str"]
            current_sample.reasoning_answer = sample_info["main_answer_str"][0]
            current_sample.sub_question = sample_info["sub_question_str"]
            current_sample.other_question = sample_info["other_question_str"]
            current_sample.text_len = torch.tensor(len(
                sample_info["main_question_tokens"]),
                                                   dtype=torch.int)

        else:

            text_processor_argument = {"text": sample_info["question_str"]}
            processed_question = self.text_processor(text_processor_argument)
            current_sample.text = processed_question["text"]
            if "sub_question_str" in sample_info:
                text_processor_argument_sq = {
                    "text": sample_info["sub_question_str"]
                }
                processed_question_sq = self.text_processor(
                    text_processor_argument_sq)
                current_sample.text_sq = processed_question_sq["text"]

            if "other_question_str" in sample_info:
                text_processor_argument_oq = {
                    "text": sample_info["other_question_str"]
                }
                processed_question_oq = self.text_processor(
                    text_processor_argument_oq)
                current_sample.text_oq = processed_question_oq["text"]
            else:
                current_sample.text_oq = current_sample.text_sq

            current_sample.question_text = sample_info["question_str"]
            current_sample.reasoning_question = sample_info["question_str"]
            current_sample.reasoning_answer = sample_info["answers"][0]
            current_sample.sub_question = sample_info["sub_question_str"]
            current_sample.other_question = sample_info["sub_question_str"]
            current_sample.text_len = torch.tensor(len(
                sample_info["question_tokens"]),
                                                   dtype=torch.int)

        current_sample.question_id = torch.tensor(sample_info["question_id"],
                                                  dtype=torch.int)

        if isinstance(sample_info["image_id"], int):
            current_sample.image_id = torch.tensor(sample_info["image_id"],
                                                   dtype=torch.int)
        else:
            current_sample.image_id = sample_info["image_id"]

        if self._use_features is True:
            features = self.features_db[idx]
            current_sample.update(features)

        # Add details for OCR like OCR bbox, vectors, tokens here
        current_sample = self.add_ocr_details(sample_info, current_sample)
        # Depending on whether we are using soft copy this can add
        # dynamic answer space
        current_sample = self.add_answer_info(sample_info, current_sample)

        return current_sample
コード例 #2
0
    def load_item(self, idx):
        sample_info = self.imdb[idx]
        current_sample = Sample()

        if "question_tokens" in sample_info:
            text_processor_argument = {
                "tokens": sample_info["question_tokens"]
            }
        else:
            #text_processor_argument = {"text": sample_info["question"]}
            text_processor_argument = {
                "text": sample_info["main_question_str"]
            }
            if "sub_question_str" in sample_info:
                text_processor_argument_sq = {
                    "text": sample_info["sub_question_str"]
                }
            if "other_question_str" in sample_info:
                text_processor_argument_oq = {
                    "text": sample_info["other_question_str"]
                }

        processed_question = self.text_processor(text_processor_argument)
        processed_question_sq = self.text_processor(text_processor_argument_sq)
        processed_question_oq = self.text_processor(text_processor_argument_oq)

        current_sample.text = processed_question["text"]
        current_sample.text_sq = processed_question_sq["text"]
        current_sample.text_oq = processed_question_oq["text"]
        current_sample.question_text = sample_info["main_question_str"]
        current_sample.reasoning_question = sample_info["main_question_str"]
        current_sample.reasoning_answer = sample_info["main_answer_str"][0]
        #current_sample.image_url = sample_info["img_path"]
        current_sample.image_url = sample_info["image_path"]

        current_sample.sub_question = sample_info["sub_question_str"]
        current_sample.other_question = sample_info["other_question_str"]

        current_sample.question_id = torch.tensor(sample_info["question_id"],
                                                  dtype=torch.int)

        if isinstance(sample_info["image_id"], int):
            current_sample.image_id = torch.tensor(sample_info["image_id"],
                                                   dtype=torch.int)
        else:
            current_sample.image_id = sample_info["image_id"]

        current_sample.text_len = torch.tensor(
            #len(sample_info["question_tokens"]), dtype=torch.int
            len(sample_info["main_question_tokens"]),
            dtype=torch.int)

        if self._use_features is True:
            features = self.features_db[idx]
            current_sample.update(features)

        # Add details for OCR like OCR bbox, vectors, tokens here
        current_sample = self.add_ocr_details(sample_info, current_sample)
        # Depending on whether we are using soft copy this can add
        # dynamic answer space
        current_sample = self.add_answer_info(sample_info, current_sample)
        #print("current sample : {}".format(current_sample))
        #pdb.set_trace()
        #print("Current sample : {}".format(current_sample))

        return current_sample