示例#1
0
    def __init__(self, __C):
        self.__C = __C

        # Loading all image paths
        # if self.__C.PRELOAD:
        self.img_feat_path_list = []
        split_list = __C.SPLIT[__C.RUN_MODE].split('+')
        for split in split_list:
            if split in ['train', 'val', 'test']:
                self.img_feat_path_list += glob.glob(__C.IMG_FEAT_PATH[split] + '*.npz')

        # Loading question word list
        self.stat_ques_list = \
            json.load(open(data_map_vqa_oe['train'], 'r')) + \
            json.load(open(data_map_vqa_oe['val'], 'r')) + \
            json.load(open(data_map_vqa_oe['test'], 'r')) + \
            json.load(open(data_map_vqa_oe['testdev'], 'r'))

        # Loading question and answer list
        self.ques_list = []
        # self.ans_list = []

        split_list = __C.SPLIT[__C.RUN_MODE].split('+')
        for split in split_list:
            self.ques_list += json.load(open(data_map_vqa_oe[split], 'r'))
            # if __C.RUN_MODE in ['train']:
            #     self.ans_list += json.load(open(data_map_vqa_oe[split], 'r'))['annotations']

        # Define run data size
        # if __C.RUN_MODE in ['train']:
        #     self.data_size = self.ans_list.__len__()
        # else:
        self.data_size = self.ques_list.__len__()

        print('== Dataset size:', self.data_size)

        # {image id} -> {image feature absolutely path}
        self.iid_to_img_feat_path = img_feat_path_load(self.img_feat_path_list)

        # {question id} -> {question}
        self.qid_to_ques = ques_load(self.ques_list, element_name='ques_id')

        # Tokenize
        self.token_to_ix, self.pretrained_emb = tokenize(self.stat_ques_list, __C.USE_GLOVE, element_name='ques')
        self.token_size = self.token_to_ix.__len__()
        print('== Question token vocab size:', self.token_size)

        self.ans_to_ix, self.ix_to_ans = ans_stat('/ExpData/gwy/vqa/v1/preprocessed/ans_dict.json')
        self.ans_size = self.ans_to_ix.__len__()
        print('== Answer vocab size (occurr more than {} times):'.format(8), self.ans_size)
        print('Finished!')
        print('')
示例#2
0
    def __init__(self, __C, img_id, input_question, input_question_id):
        self.__C = __C
        self.img_feat_path_list = []
        self.img_feat_path_list += glob.glob(__C.IMG_FEAT_PATH['train'] +
                                             '*.npz')
        self.img_feat_path_list += glob.glob(__C.IMG_FEAT_PATH['val'] +
                                             '*.npz')

        # Loading question word list
        self.stat_ques_list = \
            json.load(open(__C.QUESTION_PATH['train'], 'r'))['questions'] + \
            json.load(open(__C.QUESTION_PATH['val'], 'r'))['questions'] + \
            json.load(open(__C.QUESTION_PATH['test'], 'r'))['questions'] + \
            json.load(open(__C.QUESTION_PATH['vg'], 'r'))['questions']

        # question and answer list
        self.ques_list = [{
            'image_id': img_id,
            'question': input_question,
            'question_id': input_question_id
        }]
        self.ans_list = []

        # define run data size
        self.data_size = self.ques_list.__len__()
        # print('========== Dataset Size: ', self.data_size)

        # {image_id} -> {image feature absolutely path}
        self.iid_to_img_feat_path = img_feat_path_load(self.img_feat_path_list)

        # tokenize
        self.token_to_ix, self.pretrained_emb = tokenize(
            self.stat_ques_list, __C.USE_GLOVE)
        self.token_size = self.token_to_ix.__len__()

        self.ans_to_ix, self.ix_to_ans = ans_stat('core/data/answer_dict.json')
        self.ans_size = self.ans_to_ix.__len__()

        print('========== Finished!')
示例#3
0
    def __init__(self, __C):
        self.__C = __C

        # --------------------------
        # ---- Raw data loading ----
        # --------------------------

        # Loading all image paths
        # if self.__C.PRELOAD:
        self.img_feat_path_list = []
        split_list = __C.SPLIT[__C.RUN_MODE].split('+')
        for split in split_list:
            if split in ['train']:
                self.img_feat_path_list += glob.glob(__C.IMG_FEAT_PATH[split] +
                                                     '*.npz')

        print('Length of image features', len(self.img_feat_path_list))

        # if __C.EVAL_EVERY_EPOCH and __C.RUN_MODE in ['train']:
        #     self.img_feat_path_list += glob.glob(__C.IMG_FEAT_PATH['val'] + '*.npz')

        # else:
        #     self.img_feat_path_list = \
        #         glob.glob(__C.IMG_FEAT_PATH['train'] + '*.npz') + \
        #         glob.glob(__C.IMG_FEAT_PATH['val'] + '*.npz') + \
        #         glob.glob(__C.IMG_FEAT_PATH['test'] + '*.npz')

        # Loading question word list
        # self.stat_ques_list = \
        # json.load(open(__C.QUESTION_PATH['train'], 'r'))['questions'] + \
        # json.load(open(__C.QUESTION_PATH['val'], 'r'))['questions'] + \
        # json.load(open(__C.QUESTION_PATH['test'], 'r'))['questions'] + \
        # json.load(open(__C.QUESTION_PATH['vg'], 'r'))['questions']

        self.stat_ques_list = json.load(open(__C.QUESTION_PATH['train'],
                                             'r'))['questions']

        print('Length of questions', len(self.stat_ques_list))

        # Loading answer word list
        # self.stat_ans_list = \
        #     json.load(open(__C.ANSWER_PATH['train'], 'r'))['annotations'] + \
        #     json.load(open(__C.ANSWER_PATH['val'], 'r'))['annotations']

        self.stat_ans_list = json.load(open(__C.ANSWER_PATH['train'],
                                            'r'))['annotations']

        print('Length of answers', len(self.stat_ans_list))

        # Loading question and answer list
        self.ques_list = []
        self.ans_list = []
        print('Loading the annotation files')
        split_list = __C.SPLIT[__C.RUN_MODE].split('+')
        split_list = ['train']
        for split in split_list:
            self.ques_list += json.load(open(__C.QUESTION_PATH[split],
                                             'r'))['questions']
            if __C.RUN_MODE in ['train']:
                self.ans_list += json.load(open(__C.ANSWER_PATH[split],
                                                'r'))['annotations']

        print('Finished loading the annotation files!!!')
        # Define run data size
        if __C.RUN_MODE in ['train']:
            self.data_size = self.ans_list.__len__()
        else:
            self.data_size = self.ques_list.__len__()

        print('== Dataset size:', self.data_size)

        # ------------------------
        # ---- Data statistic ----
        # ------------------------

        # {image id} -> {image feature absolutely path}
        if self.__C.PRELOAD:
            print('==== Pre-Loading features ...')
            time_start = time.time()
            self.iid_to_img_feat = img_feat_load(self.img_feat_path_list)
            time_end = time.time()
            print('==== Finished in {}s'.format(int(time_end - time_start)))
        else:
            self.iid_to_img_feat_path = img_feat_path_load(
                self.img_feat_path_list)

        # self.ques_list = self.ques_list[:7]
        # {question id} -> {question}
        self.qid_to_ques = ques_load(self.ques_list)

        # Tokenize
        self.token_to_ix, self.pretrained_emb = tokenize(
            self.stat_ques_list, __C.USE_GLOVE)
        self.token_size = self.token_to_ix.__len__()
        print('== Question token vocab size:', self.token_size)

        # Answers statistic
        # Make answer dict during training does not guarantee
        # the same order of {ans_to_ix}, so we published our
        # answer dict to ensure that our pre-trained model
        # can be adapted on each machine.

        # Thanks to Licheng Yu (https://github.com/lichengunc)
        # for finding this bug and providing the solutions.

        # self.ans_to_ix, self.ix_to_ans = ans_stat(self.stat_ans_list, __C.ANS_FREQ)
        self.ans_to_ix, self.ix_to_ans = ans_stat('core/data/answer_dict.json')
        self.ans_size = self.ans_to_ix.__len__()
        print('== Answer vocab size (occurr more than {} times):'.format(8),
              self.ans_size)
        print('Finished!')
        print('')
示例#4
0
    def __init__(self, __C):
        self.__C = __C

        # --------------------------
        # ---- Raw data loading ----
        # --------------------------

        # Loading all image paths
        self.img_feat_path_list = \
            glob.glob(__C.IMG_FEAT_PATH['train'] + '*.npz') + \
            glob.glob(__C.IMG_FEAT_PATH['val'] + '*.npz') + \
            glob.glob(__C.IMG_FEAT_PATH['test'] + '*.npz')

        # Loading question word list
        self.stat_ques_list = \
            json.load(open(__C.QUESTION_PATH['train'], 'r'))['questions'] + \
            json.load(open(__C.QUESTION_PATH['val'], 'r'))['questions'] + \
            json.load(open(__C.QUESTION_PATH['test'], 'r'))['questions'] + \
            json.load(open(__C.QUESTION_PATH['vg'], 'r'))['questions']

        # Loading answer word list
        self.stat_ans_list = \
            json.load(open(__C.ANSWER_PATH['train'], 'r'))['annotations'] + \
            json.load(open(__C.ANSWER_PATH['val'], 'r'))['annotations']

        # Loading question and answer list
        self.ques_list = []
        self.ans_list = []

        split_list = __C.SPLIT[__C.RUN_MODE].split('+')
        for split_ in split_list:
            self.ques_list += json.load(open(__C.QUESTION_PATH[split_],
                                             'r'))['questions']
            if __C.RUN_MODE in ['train']:
                self.ans_list += json.load(open(__C.ANSWER_PATH[split_],
                                                'r'))['annotations']

        # Define run data size
        if __C.RUN_MODE in ['train']:
            self.data_size = self.ans_list.__len__()
        else:
            self.data_size = self.ques_list.__len__()

        print(' ========== run data size:', self.data_size)

        # ------------------------
        # ---- Data statistic ----
        # ------------------------

        # {image id} -> {image feature absolutely path}
        self.iid_to_img_feat_path = img_feat_path_load(self.img_feat_path_list)

        # {question id} -> {question}
        self.qid_to_ques = ques_load(self.ques_list)

        # Tokenize
        self.token_to_ix, self.pretrained_emb = tokenize(
            self.stat_ques_list, __C.USE_GLOVE)
        self.token_size = self.token_to_ix.__len__()
        print(' ========== question token size:', self.token_size)

        # Answers statistic
        self.ans_to_ix, self.ix_to_ans = ans_stat(self.stat_ans_list,
                                                  __C.ANS_FREQ)
        self.ans_size = self.ans_to_ix.__len__()
        print(
            ' ========== answer frequency more than {} size:'.format(
                __C.ANS_FREQ), self.ans_size)
        print('Loading finished !!!')
        print('')
示例#5
0
    def __init__(self, __C):
        self.__C = __C

        # --------------------------
        # ---- Raw data loading ----
        # --------------------------

        # Loading all image paths
        # if self.__C.PRELOAD:
        self.img_feat_path_list = []
        split_list = __C.SPLIT[__C.RUN_MODE].split('+')
        for split in split_list:
            if split in ['train', 'val', 'test']:
                self.img_feat_path_list += glob.glob(__C.IMG_FEAT_PATH[split] +
                                                     '*.npz')

        # if __C.EVAL_EVERY_EPOCH and __C.RUN_MODE in ['train']:
        #     self.img_feat_path_list += glob.glob(__C.IMG_FEAT_PATH['val'] + '*.npz')

        # else:
        #     self.img_feat_path_list = \
        #         glob.glob(__C.IMG_FEAT_PATH['train'] + '*.npz') + \
        #         glob.glob(__C.IMG_FEAT_PATH['val'] + '*.npz') + \
        #         glob.glob(__C.IMG_FEAT_PATH['test'] + '*.npz')

        # Loading question word list
        self.stat_ques_list = \
            json.load(open(__C.QUESTION_PATH['train'], 'r'))['questions'] + \
            json.load(open(__C.QUESTION_PATH['val'], 'r'))['questions'] + \
            json.load(open(__C.QUESTION_PATH['test'], 'r'))['questions'] + \
            json.load(open(__C.QUESTION_PATH['vg'], 'r'))['questions']

        # Loading answer word list
        # self.stat_ans_list = \
        #     json.load(open(__C.ANSWER_PATH['train'], 'r'))['annotations'] + \
        #     json.load(open(__C.ANSWER_PATH['val'], 'r'))['annotations']

        # Loading question and answer list
        self.ques_list = []
        self.ans_list = []

        split_list = __C.SPLIT[__C.RUN_MODE].split('+')
        for split in split_list:
            self.ques_list += json.load(open(__C.QUESTION_PATH[split],
                                             'r'))['questions']
            if __C.RUN_MODE in ['train']:
                self.ans_list += json.load(open(__C.ANSWER_PATH[split],
                                                'r'))['annotations']

        # Define run data size
        if __C.RUN_MODE in ['train']:
            self.data_size = self.ans_list.__len__()
        else:
            self.data_size = self.ques_list.__len__()

        print('== Dataset size:', self.data_size)

        # ------------------------
        # ---- Data statistic ----
        # ------------------------

        # {image id} -> {image feature absolutely path}
        if self.__C.PRELOAD:
            print('==== Pre-Loading features ...')
            time_start = time.time()
            self.iid_to_img_feat = img_feat_load(self.img_feat_path_list)
            time_end = time.time()
            print('==== Finished in {}s'.format(int(time_end - time_start)))
        else:
            self.iid_to_img_feat_path = img_feat_path_load(
                self.img_feat_path_list)

        # {question id} -> {question}
        self.qid_to_ques = ques_load(self.ques_list)

        # Tokenize
        self.token_to_ix, self.pretrained_emb = tokenize(
            self.stat_ques_list, __C.USE_GLOVE)
        #ix_to_token
        self.ix_to_token = {v: k for k, v in self.token_to_ix.items()}
        self.token_size = self.token_to_ix.__len__()
        print('== Question token vocab size:', self.token_size)

        # self.ans_to_ix, self.ix_to_ans = ans_stat(self.stat_ans_list, __C.ANS_FREQ)
        self.ans_to_ix, self.ix_to_ans = ans_stat('core/data/answer_dict.json')
        self.ans_size = self.ans_to_ix.__len__()
        print('== Answer vocab size (occurr more than {} times):'.format(8),
              self.ans_size)
        print('Finished!')
        print('')
    def __init__(self, __C):
        """
        :param __C: 配置信息
        """
        self.__C = __C
        #加载原始数据
        self.img_feat_path_list = []
        split_list = __C.SPLIT[__C.RUN_MODE].split('+')
        #split_list ={'train','val','vg'}
        for split in split_list:
            if split in ['train']:
                self.img_feat_path_list += glob.glob(__C.IMG_FEAT_PATH[split] +
                                                     '*.npz')

        self.stat_ques_list = json.load(open(__C.QUESTION_PATH['train'],
                                             'r'))['questions']
        # json.load(open(__C.QUESTION_PATH['train'], 'r'))['questions'] + \
        #json.load(open(__C.QUESTION_PATH['val'], 'r'))['questions']
        # json.load(open(__C.QUESTION_PATH['test'], 'r'))['questions'] + \
        # json.load(open(__C.QUESTION_PATH['vg'], 'r'))['questions']

        self.ques_list = []
        self.ans_list = []  #annotations文件内容读取到ans_list列表中

        split_list = __C.SPLIT[__C.RUN_MODE].split('+')
        for split in split_list:

            self.ques_list += json.load(open(__C.QUESTION_PATH[split],
                                             'r'))['questions']
            if __C.RUN_MODE in ['train']:
                self.ans_list += json.load(open(__C.ANSWER_PATH[split],
                                                'r'))['annotations']

        # 定义运行数据大小 = ans_list_len
        if __C.RUN_MODE in ['train']:
            self.data_size = self.ans_list.__len__()
        else:
            self.data_size = self.ques_list.__len__()

        print('== Dataset size:', self.data_size)

        # {image id} -> {image feature absolutely path}
        if self.__C.PRELOAD:  #PRELOAD=False 执行else
            print('==== Pre-Loading features ...')
            time_start = time.time()
            self.iid_to_img_feat = img_feat_load(self.img_feat_path_list)
            time_end = time.time()
            print('==== Finished in {}s'.format(int(time_end - time_start)))
        else:
            #调用data_utils.py中img_feat_path_load函数加载图像特征文件
            #图像特征{'9':'COCO_train2015_00000000009.jpg.npz',....}
            self.iid_to_img_feat_path = img_feat_path_load(
                self.img_feat_path_list)
#调用data_utils ques_load函数加载问题
# 问题加载:{'458752000':{'image_id':'','question':'','question_id':'458752000'}...}
        self.qid_to_ques = ques_load(self.ques_list)

        #调用data_utils.py的tokenize的函数
        #token_to_ix:把问题出现的词写入,如果重复出现不写,例如每个问题都有what,则只写一次
        #pretrained_emb:词嵌入
        #token_size : 大小18405
        self.token_to_ix, self.pretrained_emb = tokenize(
            self.stat_ques_list, __C.USE_GLOVE)
        self.token_size = self.token_to_ix.__len__()
        print('== Question token vocab size:', self.token_size)
        #调用 data_utils.py的ans_stat函数
        self.ans_to_ix, self.ix_to_ans = ans_stat('core/data/answer_dict.json')
        self.ans_size = self.ans_to_ix.__len__()
        print('== Answer vocab size (occurr more than {} times):'.format(8),
              self.ans_size)
        print('Finished!')
        print('')