Example #1
0
    def merge_features(self, name, num_chunks=16):
        logger.info(f"Merging dataset...")
        spec = {
            "spatial": {"features": (148855, 2048, 7, 7)},
            "objects": {"features": (148855, 100, 2048), "bboxes": (148855, 100, 4)}}

        lengths = [0]
        with h5py.File(os.path.join(self.get_processed_data_dir(), f"{name}.h5"), "w") as out:
            datasets = {}
            for dname in spec[name]:
                datasets[dname] = out.create_dataset(dname, spec[name][dname])

            low = 0
            for i in tqdm(range(num_chunks), desc="Merge dataset"):
                with h5py.File(os.path.join(self.get_raw_data_dir(), name, f"gqa_{name}_{i}.h5"), "r") as chunk:
                    high = low + chunk["features"].shape[0]
                    for dname in spec[name]:
                        datasets[dname][low:high] = chunk[dname][:]
                    low = high
                    lengths.append(high)

        logger.info(f"Saving {name} info...")
        with open(os.path.join(self.get_raw_data_dir(), name, f"gqa_{name}_info.json")) as infoIn:
            info = json.load(infoIn)
            for imageId in info:
                info[imageId]["index"] = lengths[info[imageId]["file"]] + info[imageId]["idx"]
                del info[imageId]["idx"]
                del info[imageId]["file"]
            with open(os.path.join(self.get_processed_data_dir(), f"{name}_merged_info.json"), "w") as infoOut:
                json.dump(info, infoOut)
Example #2
0
    def load_json_data(self, mode):
        logger.info(f"Loading {mode} data...")
        if mode == "train":
            root_path = os.path.join(self.get_raw_data_dir(), f'{mode}_all_questions')
            paths = [os.path.join(root_path, fn) for fn in sorted(os.listdir(root_path))]
        else:
            paths = [os.path.join(self.get_raw_data_dir(), f"{mode}_all_questions.json")]

        logger.info(f"Loading JSON...")
        data = {}
        for path in tqdm(paths, desc="Loading json"):
            with open(path) as f:
                data = {**data, **json.load(f)}
        return data
Example #3
0
    def __init__(self, builder, mode: str):
        super().__init__(builder, mode)

        self._vocab = None
        self._answers = None
        self._image_ids = None
        self._image_data = None

        with open(
                os.path.join(self.processed_data_dir,
                             "spatial_merged_info.json"), "r") as f:
            logger.info("Loading image ids...")
            self.image_ids = {
                key: val['index']
                for key, val in json.load(f).items()
            }

        if self.use_spatial_features:
            self.h5_spatial = h5py.File(
                os.path.join(self.processed_data_dir, "spatial.h5"), "r")
            self.image_spatial_features = self.h5_spatial['features']

        if self.use_object_features:
            self.h5_object = h5py.File(
                os.path.join(self.processed_data_dir, "objects.h5"), "r")
            self.image_object_features = self.h5_object['features']
            self.image_object_bboxes = self.h5_object['bboxes']
            with open(
                    os.path.join(self.processed_data_dir,
                                 "objects_merged_info.json"), "r") as f:
                logger.info("Loading object info...")
                self.object_info = json.load(f)

            self.relation_vocab = Vocab.from_file(
                self.builder.relation_name_path)
            self.object_vocab = Vocab.from_file(self.builder.object_name_path)
            self.attribute_vocab = Vocab.from_file(
                self.builder.attribute_name_path)
            self.relation_vocab.init_pretrained_embeddings('glove')
            self.object_vocab.init_pretrained_embeddings('glove')
            self.attribute_vocab.init_pretrained_embeddings('glove')

        if self.use_bert_features:
            self.h5_bert = h5py.File(
                os.path.join(self.processed_data_dir,
                             f"bert_features_{mode}.h5"), "r")
            self.question_bert_outputs = self.h5_bert['outputs']
            self.question_bert_states = self.h5_bert['state']
            self.question_bert_lengths = self.h5_bert['lengths']
Example #4
0
 def write_results_to_file(self,
                           all_predictions: List[Any],
                           output_path: str,
                           output_tag: str,
                           format: str = None) -> str:
     os.makedirs(output_path, exist_ok=True)
     res = [
         dict(questionId=sample.question_id, prediction=self.answers[pred])
         for sample, pred in zip(self.data, all_predictions)
     ]
     path = os.path.join(output_path, output_tag + ".json")
     with open(path, "w") as f:
         json.dump(res, f)
         logger.info("Results written to %s", path)
     return path
Example #5
0
    def process_graphs(self):
        logger.info("Processing scene graphs...")
        for mode in ["train", "val"]:
            with open(os.path.join(self.get_raw_data_dir(), f"{mode}_sceneGraphs.json")) as f:
                data = json.load(f)

            if mode == "train":
                relation_names = {'<oov>'}
                for s in data.values():
                    for obj in s['objects'].values():
                        if 'relations' in obj:
                            relation_names |= set(rela['name'] for rela in obj['relations'])
                with open(self.relation_name_path, "w") as f:
                    f.write('\n'.join(list(relation_names)))
                    logger.info(f"No. relations: {len(relation_names)}")

                object_names = {'<oov>'}
                for s in data.values():
                    for obj in s['objects'].values():
                        object_names.add(obj['name'])
                with open(self.object_name_path, "w") as f:
                    f.write('\n'.join(list(object_names)))
                    logger.info(f"No. object identities: {len(object_names)}")

                attribute_names = {'<oov>'}
                for s in data.values():
                    for obj in s['objects'].values():
                        for attr in obj['attributes']:
                            attribute_names.add(attr)
                with open(self.attribute_name_path, 'w') as f:
                    f.write('\n'.join(list(attribute_names)))
                    logger.info(f"No. object attributes: {len(attribute_names)}")

            outputs = []
            for image_id, s in data.items():
                obj_idx = {obj_id: idx for idx, obj_id in enumerate(s['objects'].keys())}
                for obj_id, obj in s['objects'].items():
                    rela = [None for _ in range(len(obj_idx))]
                    for r in obj['relations']:
                        rela[obj_idx[r['object']]] = r['name']
                    outputs.append([
                        image_id,
                        obj_idx[obj_id],
                        obj['name'],
                        ','.join(obj['attributes']),
                        ','.join('' if r is None else r for r in rela)
                    ])

            with open(self.get_image_data_path(mode), 'w') as f:
                f.write('\n'.join(['\t'.join([
                    str(cell) for cell in d
                ]) for d in outputs]))
Example #6
0
    def data(self):
        if self._data is None:
            self._data = []
            with open(self.builder.get_data_path(self.mode)) as f:
                lines = [l for l in f.read().split('\n') if l.strip() != ""]
                logger.info(
                    f"Dataset loaded. Number of samples: {len(lines):,}")

            for line in lines:
                img_path, q, _, a = line.split('\t')
                self._data.append(
                    Sample(image_path=img_path,
                           question=self.vocab.encode_token_list(q.split(' ')),
                           answer=int(a)))

            # self._data.sort(key=lambda d: len(d[1]))
            logger.info("Question length - max: %d - avg: %d",
                        max(len(d[1]) for d in self._data),
                        np.average([len(d[1]) for d in self._data]))
        return self._data
Example #7
0
    def run_evaluation_script(
            self,
            result_path,
            **kwargs):
        cmd = [
            "python", os.path.join(self.get_raw_data_dir(), "eval.py"),
            "--tier", "val",
            "--scenes", os.path.join(self.get_raw_data_dir(), "val_sceneGraphs.json"),
            "--choices", os.path.join(self.get_raw_data_dir(), "val_choices.json"),
            "--predictions", result_path,
            "--questions", os.path.join(self.get_raw_data_dir(), "val_all_questions.json")
        ]
        logger.info("Running evaluation script...\n%s", " ".join(cmd))
        res = subprocess.Popen(cmd, stdout=subprocess.PIPE)

        res, _ = res.communicate()
        res = res.decode()
        logger.info("Evaluation script output:\n%s", res)
        return dict(
            accuracy=float(re.search(r"Accuracy: (\d*\.\d*)%", res).group(1)),
            validity=float(re.search(r"Validity: (\d*\.\d*)%", res).group(1)),
            plausibility=float(re.search(r"Plausibility: (\d*\.\d*)%", res).group(1)),
            distribution=float(re.search(r"Distribution: (\d*\.\d*)", res).group(1))
        )
Example #8
0
    def data(self):
        if self._data is None:
            self._data = []
            logger.info("Loading from %s...",
                        self.builder.get_data_path(self.mode))
            with open(self.builder.get_data_path(self.mode)) as f:
                lines = [l for l in f.read().split('\n') if l.strip() != ""]
            answer_dict = {a: i for i, a in enumerate(self.answers)}
            for line in tqdm(lines[:self.configs.size or -1],
                             leave=False,
                             desc="Parse dataset"):
                qid, image_id, q, a = line.split('\t')
                self._data.append(
                    Sample(question_id=qid,
                           image_id=image_id,
                           question=self.encode_token_list(q.split(' ')),
                           answer=answer_dict.get(a, -1)))

            # assert all(len(self.answers) > s.answer >= 0 for s in self._data)
            logger.info(f"Dataset loaded. Number of samples: {len(lines):,}")
            logger.info("Question length - max: %d - avg: %d",
                        max(len(s.question) for s in self._data),
                        np.average([len(s.question) for s in self._data]))
        return self._data
Example #9
0
 def answers(self):
     if self._answers is None:
         self._answers = open(self.builder.answer_path,
                              "r").read().strip().split('\n')
         logger.info("Number of answers: %d", len(self._answers))
     return self._answers
Example #10
0
 def vocab(self):
     if self._vocab is None:
         self._vocab = Vocab.from_file(self.builder.vocab_path)
         logger.info("Vocab size: %d", len(self._vocab))
     return self._vocab