示例#1
0
    def __init__(self, scenes_json, questions_json, image_root, image_transform, vocab_json, question_transform=None, incl_scene=True):
        super().__init__()

        self.scenes_json = scenes_json
        self.questions_json = questions_json
        self.image_root = image_root
        self.image_transform = image_transform
        self.vocab_json = vocab_json
        self.question_transform = question_transform

        self.incl_scene = incl_scene

        logger.info('Loading scenes from: "{}".'.format(self.scenes_json))
        self.scenes = io.load_json(self.scenes_json)['scenes']
        if isinstance(self.questions_json, (tuple, list)):
            self.questions = list()
            for filename in self.questions_json:
                logger.info('Loading questions from: "{}".'.format(filename))
                self.questions.extend(io.load_json(filename)['questions'])
        else:
            logger.info('Loading questions from: "{}".'.format(self.questions_json))
            self.questions = io.load_json(self.questions_json)['questions']

        if self.vocab_json is not None:
            logger.info('Loading vocab from: "{}".'.format(self.vocab_json))
            self.vocab = Vocab.from_json(self.vocab_json)
        else:
            logger.info('Building the vocab.')
            self.vocab = gen_vocab(self)
示例#2
0
def main():
    scenes = io.load_json(args.scene_json)['scenes']
    preds = io.load(args.preds_json)
    if isinstance(preds, dict):
        preds = list(preds.values())
    if False:
        preds = [transpose_scene(s) for s in preds]

    # flattened_objs = [o for s in scenes for o in s['objects']]
    # flattened_preds = {
    #     k: np.concatenate([np.array(p[k]) for p in preds], axis=0)
    #     for k in preds[0]
    # }
    meter = GroupMeters()

    '''
    for i, scene in tqdm_gofor(scenes, mininterval=0.5):
        for j in range(len(scene['objects'])):
            test(j, scene['objects'], preds[i], meter)
    '''
    for i, pred in tqdm_gofor(preds, mininterval=0.5):
        scene = scenes[i]
        for j in range(len(scene['objects'])):
            test(j, scene['objects'], pred, meter)

    print(meter.format_simple('Results:', compressed=False))
    def __init__(self,
                 scenes_json,
                 features_h5=None,
                 image_root=None,
                 image_transform=None,
                 question_per_image=20,
                 balance_attribute=True,
                 balance_answer=True):
        super().__init__()

        self.scenes_json = scenes_json
        self.features_h5 = features_h5
        self.image_root = image_root
        self.image_transform = image_transform
        self.question_per_image = question_per_image
        self.balance_attribute = balance_attribute
        self.balance_answer = balance_answer

        self.scenes = io.load_json(self.scenes_json)['anns']
        if self.features_h5 is not None:
            self.features = io.open_h5(features_h5, 'r')['features']
            assert len(self.features) == len(self.scenes)

        # TODO(Jiayuan Mao @ 04/10): no vocab for CCG models.
        self.vocab = None

        self._gen_data()
示例#4
0
def main():
    scenes = io.load_json(args.scene_json)['scenes']
    preds = io.load(args.preds_json)
    if isinstance(preds, dict):
        preds = list(preds.values())
    if False:
        preds = [transpose_scene(s) for s in preds]
    meter = GroupMeters()

    flattened_objs = [o for s in scenes for o in s['objects']]
    flattened_preds = {
        k: np.concatenate([np.array(p[k]) for p in preds], axis=0)
        for k in preds[0]
    }

    for k, preds in flattened_preds.items():
        kk = def_.word2lemma.get(k, k)
        for i, o in tqdm_gofor(flattened_objs,
                               desc='{}(lemma: {})'.format(k, kk),
                               leave=False):
            meter.update(
                'acc', (preds[i] > 0) == (kk == o[def_.concept2attribute[kk]]))
            meter.update(
                f'acc/{k}',
                (preds[i] > 0) == (kk == o[def_.concept2attribute[kk]]))
    print(meter.format_simple('Results:', compressed=False))
示例#5
0
    def __init__(
        self,
        scenes_json,
        questions_json,
        image_root,
        image_transform,
        depth_root,
        vocab_json,
        question_transform=None,
        incl_scene=True,
    ):
        super().__init__()

        self.scenes_json = scenes_json
        self.questions_json = questions_json
        self.image_root = image_root
        self.image_transform = image_transform
        self.depth_root = depth_root
        self.vocab_json = vocab_json
        self.question_transform = question_transform

        self.incl_scene = incl_scene

        logger.info('Loading scenes from: "{}".'.format(self.scenes_json))
        self.scenes = io.load_json(self.scenes_json)["scenes"]
        if isinstance(self.questions_json, (tuple, list)):
            self.questions = list()
            for filename in self.questions_json:
                logger.info('Loading questions from: "{}".'.format(filename))
                self.questions.extend(io.load_json(filename)["questions"])
        else:
            logger.info('Loading questions from: "{}".'.format(self.questions_json))
            self.questions = io.load_json(self.questions_json)["questions"]

        if self.vocab_json is not None:
            logger.info('Loading vocab from: "{}".'.format(self.vocab_json))
            self.vocab = Vocab.from_json(self.vocab_json)
        else:
            logger.info("Building the vocab.")
            self.vocab = gen_vocab(self)
            vocab_file = questions_json.split("/")
            vocab_file[-1] = "vocab.json"
            vocab_file = "/".join(vocab_file)
            self.vocab.dump_json(vocab_file)
示例#6
0
    def __init__(self, scenes_json, image_root, image_transform, incl_scene=True):
        super().__init__()

        self.scenes_json = scenes_json
        self.image_root = image_root
        self.image_transform = image_transform
        self.incl_scene = incl_scene

        logger.info('Loading scenes from: "{}".'.format(self.scenes_json))
        self.scenes = io.load_json(self.scenes_json)['scenes']
示例#7
0
 def load_files(self):
     print('Loading captions and precomputed image embeddings')
     self.image_embeddings = load(self.image_embeddings)
     self.captions = load_json(self.captions)
     assert len(self.captions) == len(self.image_embeddings)
     # self.captions = [self.captions[i] for i in non_empty_inds]
     # self.image_embeddings = self.image_embeddings[non_empty_inds]
     if self.mode is CompletionDatasetMode.SAMPLE:
         self.non_empty_inds = [
             i for i, c in enumerate(self.captions) if len(c['replace']) > 0
         ]
     else:
         if self.mode is CompletionDatasetMode.ALL:
             replace = lambda c: c['replace']
         elif self.mode is CompletionDatasetMode.NOUN:
             replace = lambda c: c['replace_noun']
         elif self.mode is CompletionDatasetMode.PREP:
             replace = lambda c: c['replace_prep']
         self.all_inds = [(i, r) for i, c in enumerate(self.captions)
                          for r in replace(c)]
示例#8
0
文件: eval.py 项目: lilujunai/VCML
def main():
    scenes = io.load_json(args.scene_json)['scenes']
    preds = io.load(args.preds_json)
    if isinstance(preds, dict):
        preds = list(preds.values())
    if False:
        preds = [transpose_scene(s) for s in preds]
    scenes = scenes[:1000]
    preds = preds[:1000]

    flattened_objs = [o for s in scenes for o in s['objects']]
    flattened_preds = {
        k: np.concatenate([np.array(p[k]) for p in preds], axis=0)
        for k in preds[0]
    }
    meter = GroupMeters()

    for i, obj in tqdm_gofor(flattened_objs, mininterval=0.5):
        test(i, flattened_objs, flattened_preds, meter)

    print(meter.format_simple('Results:', compressed=False))
示例#9
0
    def __init__(self, scenes_json, image_root, depth_root, incl_scene=True):

        super().__init__()

        self.scenes_json = scenes_json
        self.image_root = image_root
        self.depth_root = depth_root
        self.incl_scene = incl_scene

        logger.info('Loading scenes from: "{}".'.format(self.scenes_json))
        self.scenes = io.load_json(self.scenes_json)["scenes"]
        import jactorch.transforms.bbox as T

        self.image_transform = T.Compose(
            [
                # T.NormalizeBbox(),
                # T.Resize(configs.data.image_size),
                # T.DenormalizeBbox(),
                T.ToTensor(),
                T.Lambda(lambda x, y: (x - 0.5, y))
                # T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
            ]
        )
示例#10
0
 def check_json_consistency(self, json_file):
     rhs = io.load_json(json_file)
     for k, v in self.word2idx.items():
         if not (k in rhs and rhs[k] == v):
             return False
     return True
示例#11
0
 def from_json(cls, json_file):
     return cls(io.load_json(json_file))