Пример #1
0
def main(params):
    dataset_splitBy = params['dataset'] + '_' + params['splitBy']
    if not osp.isdir('cache/parsed_sents/' + dataset_splitBy):
        os.makedirs('cache/parsed_sents/' + dataset_splitBy)
    # anns = torch.load(params['data_root'])
    # sents = []
    # for im in anns:
    # 	masks = anns[im]
    # 	for mask_pos in masks:
    # 		mask = masks[mask_pos]
    # 		mask_sents = []
    # 		for sent in mask['sentences']:
    # 			mask_sents.append({'sent_id': (im, mask_pos), 'sent': sent,
    # 			                   'raw': sent, 'tokens': sent.split()})
    # 		sents += mask_sents
    # 		mask['sentences'] = mask_sents
    # load refer
    # sys.path.insert(0, 'pyutils/refer')
    # from refer import REFER
    refer = REFER(params['data_root'], params['dataset'], params['splitBy'])

    # parse sents
    sents = list(refer.sents.values())
    parse_sents(sents, params)

    # save
    with open(osp.join('cache/parsed_sents/' + dataset_splitBy, 'sents.json'),
              'w') as io:
        json.dump(sents, io)
Пример #2
0
    def process_coco(self, setname, dataset_folder):
        split_dataset = []
        vocab_file = osp.join(self.split_dir, 'vocabulary_Gref.txt')

        refer = REFER(self.dataset_root,
                      **(self.SUPPORTED_DATASETS[self.dataset]['params']))

        refs = [
            refer.refs[ref_id] for ref_id in refer.refs
            if refer.refs[ref_id]['split'] == setname
        ]

        refs = sorted(refs, key=lambda x: x['file_name'])

        for i, k in enumerate(list(refer.Cats.keys())):
            self.cat_to_id[k] = i

        if len(self.corpus) == 0:
            print('Saving dataset corpus dictionary...')
            corpus_file = osp.join(self.split_root, self.dataset, 'corpus.pth')
            self.corpus.load_file(vocab_file)
            torch.save(self.corpus, corpus_file)

        if not osp.exists(self.mask_dir):
            os.makedirs(self.mask_dir)
        cats = []
        for ref in tqdm.tqdm(refs):
            img_filename = 'COCO_train2014_{0}.jpg'.format(
                str(ref['image_id']).zfill(12))
            if osp.exists(osp.join(self.im_dir, img_filename)):
                h, w, _ = cv2.imread(osp.join(self.im_dir, img_filename)).shape
                seg = refer.anns[ref['ann_id']]['segmentation']
                bbox = refer.anns[ref['ann_id']]['bbox']
                area = refer.anns[ref['ann_id']]['area']
                cat = ref['category_id']
                cats.append(cat)
                # print(refer.Cats[ref['category_id']])
                rle = cocomask.frPyObjects(seg, h, w)
                mask = np.max(cocomask.decode(rle), axis=2).astype(np.float32)
                mask = torch.from_numpy(mask)
                mask_file = str(ref['ann_id']) + '.pth'
                mask_filename = osp.join(self.mask_dir, mask_file)
                if not osp.exists(mask_filename):
                    torch.save(mask, mask_filename)
                for sentence in ref['sentences']:
                    split_dataset.append(
                        (img_filename, mask_file, bbox, sentence['sent'], area,
                         self.cat_to_id[cat]))

        output_file = '{0}_{1}.pth'.format(self.dataset, setname)
        torch.save(split_dataset, osp.join(dataset_folder, output_file))
Пример #3
0
def main(params):

    # dataset_splitBy
    data_root, dataset, splitBy = params['data_root'], params[
        'dataset'], params['splitBy']

    # max_length
    if params['max_length'] == None:
        if params['dataset'] in ['refcoco', 'refclef', 'refcoco+']:
            params['max_length'] = 10
            params['topK'] = 50
        elif params['dataset'] in ['refcocog']:
            params['max_length'] = 20
            params['topK'] = 50
        else:
            raise NotImplementedError

    # mkdir and write json file
    if not osp.isdir(osp.join('cache/prepro', dataset + '_' + splitBy)):
        os.makedirs(osp.join('cache/prepro', dataset + '_' + splitBy))

    # load refer
    # sys.path.insert(0, 'pyutils/refer')
    # from refer import REFER
    refer = REFER(data_root, dataset, splitBy)

    # create vocab
    vocab, sentToFinal = build_vocab(refer, params)
    itow = {i: w for i, w in enumerate(vocab)}
    wtoi = {w: i for i, w in enumerate(vocab)}

    # check sentence length
    check_sentLength(sentToFinal)

    # create attribute vocab
    att2cnt, ref_to_att_wds = build_att_vocab(refer, params,
                                              ['r1', 'r2', 'r7'])
    itoa = {i: a for i, a in enumerate(att2cnt.keys())}
    atoi = {a: i for i, a in enumerate(att2cnt.keys())}

    # prepare refs, images, anns, sentences
    # and write json
    refs, images, anns, sentences = prepare_json(refer, sentToFinal,
                                                 ref_to_att_wds, params)
    json.dump(
        {
            'refs': refs,
            'images': images,
            'anns': anns,
            'sentences': sentences,
            'word_to_ix': wtoi,
            'att_to_ix': atoi,
            'att_to_cnt': att2cnt,
            'cat_to_ix':
            {cat_name: cat_id
             for cat_id, cat_name in refer.Cats.items()},
            'label_length': params['max_length'],
        },
        open(
            osp.join('cache/prepro', dataset + '_' + splitBy,
                     params['output_json']), 'w'))
    print('%s written.' % osp.join('cache/prepro', params['output_json']))

    # write h5 file which contains /sentences
    f = h5py.File(
        osp.join('cache/prepro', dataset + '_' + splitBy, params['output_h5']),
        'w')
    L = encode_captions(sentences, wtoi, params)
    f.create_dataset("labels", dtype='int32', data=L)
    f.close()
    print('%s writtern.' % osp.join('cache/prepro', params['output_h5']))