Beispiel #1
0
def main(args):
    dataset_splitBy = args.dataset + '_' + args.splitBy
    if not osp.isdir(osp.join('cache/feats/', dataset_splitBy)):
        os.makedirs(osp.join('cache/feats/', dataset_splitBy))

    # Image Directory
    if 'coco' in dataset_splitBy or 'combined' in dataset_splitBy:
        IMAGE_DIR = 'data/images/mscoco/images/train2014'
    elif 'clef' in dataset_splitBy:
        IMAGE_DIR = 'data/images/saiapr_tc-12'
    elif 'sunspot' in dataset_splitBy:
        IMAGE_DIR = 'data/images/SUNRGBD'
    else:
        print('No image directory prepared for ', args.dataset)
        sys.exit(0)

    # load dataset
    data_json = osp.join('cache/prepro', dataset_splitBy, 'data.json')
    data_h5 = osp.join('cache/prepro', dataset_splitBy, 'data.h5')
    dets_json = osp.join(
        'cache/detections', dataset_splitBy,
        '%s_%s_%s_dets.json' % (args.net_name, args.imdb_name, args.tag))
    loader = DetsLoader(data_json, data_h5, dets_json)
    images = loader.images
    dets = loader.dets
    num_dets = len(dets)
    assert sum([len(image['det_ids']) for image in images]) == num_dets

    # load mrcn model
    mrcn = inference_no_imdb.Inference(args)

    # feats_h5
    file_name = '%s_%s_%s_det_feats.h5' % (args.net_name, args.imdb_name,
                                           args.tag)
    feats_h5 = osp.join('cache/feats', dataset_splitBy, 'mrcn', file_name)

    f = h5py.File(feats_h5, 'w')
    fc7_set = f.create_dataset('fc7', (num_dets, 2048), dtype=np.float32)
    pool5_set = f.create_dataset('pool5', (num_dets, 1024), dtype=np.float32)

    # extract
    feats_dir = '%s_%s_%s' % (args.net_name, args.imdb_name, args.tag)
    head_feats_dir = osp.join('cache/feats/', dataset_splitBy, 'mrcn',
                              feats_dir)
    for i, image in enumerate(images):
        image_id = image['image_id']
        net_conv, im_info = image_to_head(head_feats_dir, image_id)
        det_ids = image['det_ids']
        for det_id in det_ids:
            det = loader.Dets[det_id]
            det_pool5, det_fc7 = det_to_pool5_fc7(mrcn, det, net_conv, im_info)
            det_h5_id = det['h5_id']
            fc7_set[det_h5_id] = det_fc7.data.cpu().numpy()
            pool5_set[det_h5_id] = det_pool5.data.cpu().numpy()
        if i % 20 == 0:
            print('%s/%s done.' % (i + 1, len(images)))

    f.close()
    print('%s written.' % feats_h5)
Beispiel #2
0
def main(args):
    dataset_splitBy = args.dataset + '_' + args.splitBy
    if not osp.isdir(osp.join('cache/feats/', dataset_splitBy)):
        os.makedirs(osp.join('cache/feats/', dataset_splitBy))

    # Image Directory
    if 'coco' or 'combined' in dataset_splitBy:
        IMAGE_DIR = 'data/images/mscoco/images/train2014'
    elif 'clef' in dataset_splitBy:
        IMAGE_DIR = 'data/images/saiapr_tc-12'
    else:
        print('No image directory prepared for ', args.dataset)
        sys.exit(0)

    # load dataset
    data_json = osp.join('cache/prepro', dataset_splitBy, 'data.json')
    data_h5 = osp.join('cache/prepro', dataset_splitBy, 'data.h5')
    loader = Loader(data_json, data_h5)
    images = loader.images
    anns = loader.anns
    num_anns = len(anns)
    assert sum([len(image['ann_ids']) for image in images]) == num_anns

    # load mrcn model
    mrcn = inference_no_imdb.Inference(args)

    # feats_h5
    # feats_h5 = osp.join('cache/feats', dataset_splitBy, args.file_name)
    file_name = '%s_%s_%s_ann_feats.h5' % (args.net_name, args.imdb_name,
                                           args.tag)
    feats_h5 = osp.join('cache/feats', dataset_splitBy, 'mrcn', file_name)

    f = h5py.File(feats_h5, 'w')
    pool5_set = f.create_dataset('pool5', (num_anns, 1024), dtype=np.float32)
    fc7_set = f.create_dataset('fc7', (num_anns, 2048), dtype=np.float32)

    # extract
    feats_dir = '%s_%s_%s' % (args.net_name, args.imdb_name, args.tag)
    head_feats_dir = osp.join('cache/feats/', dataset_splitBy, 'mrcn',
                              feats_dir)
    for i, image in enumerate(images):
        image_id = image['image_id']
        net_conv, im_info = image_to_head(head_feats_dir, image_id)
        ann_ids = image['ann_ids']
        for ann_id in ann_ids:
            ann = loader.Anns[ann_id]
            ann_pool5, ann_fc7 = ann_to_pool5_fc7(mrcn, ann, net_conv, im_info)
            ann_h5_id = ann['h5_id']
            pool5_set[ann_h5_id] = ann_pool5.data.cpu().numpy()
            fc7_set[ann_h5_id] = ann_fc7.data.cpu().numpy()
        if i % 20 == 0:
            print('%s/%s done.' % (i + 1, len(images)))

    f.close()
    print('%s written.' % feats_h5)
Beispiel #3
0
 def prepare_mrcn(self, head_feats_dir, args):
     """
     Arguments:
       head_feats_dir: cache/feats/dataset_splitBy/net_imdb_tag, containing all image conv_net feats
       args: imdb_name, net_name, iters, tag
     """
     self.head_feats_dir = head_feats_dir
     self.mrcn = inference_no_imdb.Inference(args)
     assert args.net_name == 'res101'
     self.pool5_dim = 1024
     self.fc7_dim = 2048