def build_db(fn_pattern='.jpg'):

    src_img_dir = os.path.join(Pascal3D_root, 'Images')
    #
    try: os.makedirs(db_path)
    except: pass
    imdb = ImageData_lmdb(db_path, 'w')  # 'a+')  #

    allIDs = []
    for collection in ['train','val']:
        for label, cate in enumerate(categories):
            _, rcobjs = get_anno(cate, collection=collection, filter='all', img_scale='Org', withCoarseVp=True)
            imgIDs = get_imgIDs(rcobjs)

            print ('%15s  %s   %5d' % (cate, collection, len(imgIDs)))
            for i, imgID in enumerate(tqdm(imgIDs)):
                # if is_py3:
                #     imgID = imgID.decode('UTF-8')

                if imgID[0]=='n':
                    fo = '%s_imagenet' % cate
                else:
                    fo = '%s_pascal'   % cate
                image_file = os.path.join(src_img_dir, fo, '%s.%s' % (imgID, fn_pattern.strip('.')))
                assert os.path.exists(image_file), image_file

                img = cv2.imread(image_file) # , cv2.IMREAD_UNCHANGED)
                imdb[imgID] = img
                allIDs.append(imgID)

    imdb.close()
    print ('All Images: %d' % len(allIDs))
    print ('All Images: %d  (unique)' % len(set(allIDs)))

    return db_path
Example #2
0
def build_db():

    try: os.makedirs(db_path)
    except: print('Make dirs skipped!')


    # NpyData_lmdb
    imdb = ImageData_lmdb(db_path, 'w')

    cate2imgIDs = {}
    # from multiprocessing import Pool
    for cate, synsetID in cate_synsetID:
        cate_dir = os.path.join(src_img_dir, synsetID)
        shapeIDs = [x for x in os.listdir(cate_dir) if os.path.isdir(os.path.join(cate_dir,x))]
        print(cate)
        for _k, shapeID in enumerate(tqdm(shapeIDs)):
            sys.stdout.flush()
            cate_shape_dir = os.path.join(cate_dir, shapeID)
            imgIDs = [x[:-4] for x in os.listdir(cate_shape_dir) if x.endswith('.png')]
            for imgID in imgIDs:
                imgpath = os.path.join(cate_shape_dir,imgID+'.png')
                cate2imgIDs.setdefault(cate, []).append(imgID)
                # write to lmdb.
                #-# img = cv2.imread(imgpath) # , cv2.IMREAD_UNCHANGED)
                #-# imdb[imgID] = img
                imdb.put(imgID, imgpath)

        Open(os.path.join(base_dir,'cate2imgIDs/%s.txt'%cate), 'w').write('\n'.join(cate2imgIDs[cate])+'\n')
        # exit()

    pickle.dump(cate2imgIDs, Open(os.path.join(base_dir,'cate2imgIDs.pkl'), 'wb'), protocol)
    return db_path
Example #3
0
    def __init__(self,
                 collection,
                 cates,
                 rsz_shape,
                 with_aug=False,
                 context_pad=16,
                 img_scale='Org',
                 mode='torchmodel'):
        super().__init__(collection, rsz_shape, mode=mode)  # python3 only

        db_path = pascal3d_imdb_path  # _get_local_db_path(_db_path)
        assert db_path is not None, '%s  is not exist.' % (db_path)
        self.datadb = ImageData_lmdb(db_path)
        #
        self.with_aug = with_aug

        if self.with_aug:
            # Pre-computed augmentation box
            raise NotImplementedError  #@Shuai: (View Estimation on GT doesn't use box augmentation).
        else:  # use gt box
            # Pre-computed (verified) clamped gt box
            gtbox_path = pascal3d_gt_box_path.format(collection=collection)
            self.objId2gtbox = pickle.load(open(gtbox_path, 'rb'))

        self.context_scale = float(
            rsz_shape[0]) / (rsz_shape[0] - 2 * context_pad)
Example #4
0
    def __init__(self, collection, cates, rsz_shape, mode='torchmodel'):
        super().__init__(collection, rsz_shape, mode=mode)  # python3 only
        # LMDB for original image in jpg format. (rescaled to max side 500).
        db_path = syn_imdb_path  # _get_local_db_path(syn_db_path)
        assert db_path is not None, '%s  is not exist.' % (syn_imdb_path)

        self.datadb = ImageData_lmdb(db_path, 'r')
 def __init__(self, collection='train', net_arch='alexnet', sampling=1.0):
     self.net_arch = net_arch
     self.cfg = netcfg[net_arch]
     self.collection = collection
     self.cates = cate10
     #
     self.cate2ind = odict(zip(self.cates, range(len(self.cates))))
     # get im_db
     self.db_path = os.path.join(base_dir, 'ModelNet10-SO3',
                                 self.collection2dbname[collection])
     assert self.db_path is not None, '%s  is not exist.' % (self.db_path)
     self.datadb = ImageData_lmdb(self.db_path)
     # Get anno
     self.keys, self.recs = get_anno(self.db_path)
     assert sampling > 0 and sampling <= 1.0, sampling
     if sampling < 1.0:
         print('Sampling dataset: %s' % sampling)
         _inds = np.arange(len(self.keys))
         sample_inds = np.random.choice(_inds,
                                        size=int(len(_inds) * sampling),
                                        replace=False)
         sample_inds.sort()
         self.keys, self.recs = [self.keys[x] for x in sample_inds
                                 ], self.recs[sample_inds]
     self.key2ind = dict(zip(self.keys, range(len(self.keys))))
     # self.resize_shape = rsz_shape
     self.mean_pxl = np.array([102.9801, 115.9465, 122.7717], np.float32)
Example #6
0
def test(visualize=True):

    datadb = ImageData_lmdb(db_path)
    print (datadb.keys)
    for k in datadb.keys:
        img = datadb[k]

        if visualize: # :
            cv2.imshow('real_img', img)
            cv2_wait()
Example #7
0
def eval_all(pred_dbpath, test_ids=None, use_multiprocess=False):
    from lmdb_util import NpyData_lmdb, ImageData_lmdb
    imdb_gtNorm = ImageData_lmdb(data_dir + 'NormCamera.Rawpng.lmdb',
                                 always_load_color=False)
    imdb_gtMask = ImageData_lmdb(data_dir + 'Valid.Rawpng.lmdb',
                                 always_load_color=False)
    imdb_prNorm = ImageData_lmdb(pred_dbpath, always_load_color=False)
    #
    if test_ids is None:
        test_ids = [
            x.strip().split('/')[1]
            for x in open(data_dir + 'testNdxs.txt').readlines()
        ]

    # use multicores
    if use_multiprocess:
        print("Warning: TODO, haven't solve lmdb with multiprocessing issue.")
        raise NotImplementedError
        from multiprocessing import Pool, cpu_count
        from functools import partial
        from itertools import izip, repeat as Rp
        p = Pool(cpu_count())
        Es = p.map(
            process_one,
            izip(test_ids, Rp(imdb_gtNorm), Rp(imdb_gtMask), Rp(imdb_prNorm)))
    else:
        Es = []
        for i, test_img_id in enumerate(test_ids):
            E = process_one(
                (test_img_id, imdb_gtNorm, imdb_gtMask, imdb_prNorm))
            Es.append(E)
            print('\r %s / %s  ' % (i, len(test_ids)), end='', flush=True)

    Es = np.concatenate(Es)
    #
    mean = np.mean(Es)
    median = np.median(Es)
    rmse = np.sqrt(np.mean(np.power(Es, 2)))
    acc11 = np.mean(Es < 11.25) * 100
    acc22 = np.mean(Es < 22.5) * 100
    acc30 = np.mean(Es < 30) * 100
    return mean, median, rmse, acc11, acc22, acc30
def read_image_size(db_path):

    imdb = ImageData_lmdb(db_path)
    print ("Nr. Images: ", len(imdb.keys), imdb.len)

    imgID2size = {}
    for img_id in imdb.keys:
        # print
        h, w, c = imdb[img_id].shape
        assert c==3, img_id
        imgID2size[img_id] = (h, w)
    pickle.dump(imgID2size, open(os.path.join(db_path,'imgID2size.pkl'), 'wb'), protocol)
    def __init__(self,
                 collection='train',
                 net_arch='vgg16',
                 style='pytorch',
                 with_flip=False,
                 sampling=dict(nyu=1.0, syn=0.0),
                 Ladicky_normal=False):  #
        self.net_arch = net_arch
        self.cfg = netcfg[net_arch]
        self.collection = collection
        self.Ladicky_normal = Ladicky_normal
        #
        if collection == 'test':
            assert not with_flip, 'Test collection should without flip.'
            assert sampling['syn'] == 0
        self.with_flip = with_flip
        #
        db_path = base_dir + '/SurfaceNormal/nyu_v2/{}.lmdb'
        id_path = base_dir + '/SurfaceNormal/nyu_v2/{}Ndxs.txt'
        self.nyu_dbImage = ImageData_lmdb(
            db_path.format('ImageData.Rawpng'),
            always_load_color=False)  # cv2.IMREAD_UNCHANGED
        if Ladicky_normal:
            print(
                'Using GT normal of NYU v2 from Ladicky et al.  and  ALL Valid  mask'
            )
            self.nyu_dbNorm = ImageData_lmdb(
                db_path.format('NormCamera_Ladicky.Rawpng'),
                always_load_color=False)  # cv2.IMREAD_UNCHANGED
            self.nyu_dbValid = ImageData_lmdb(
                db_path.format('Valid_ALL.Rawpng'),
                always_load_color=False)  # cv2.IMREAD_UNCHANGED
        else:
            self.nyu_dbNorm = ImageData_lmdb(
                db_path.format('NormCamera.Rawpng'),
                always_load_color=False)  # cv2.IMREAD_UNCHANGED
            self.nyu_dbValid = ImageData_lmdb(
                db_path.format('Valid.Rawpng'),
                always_load_color=False)  # cv2.IMREAD_UNCHANGED
        if sampling['syn'] > 0:
            syn_db_path = base_dir + '/SurfaceNormal/pbrs/{}.lmdb'
            syn_id_path = base_dir + '/SurfaceNormal/pbrs/data_goodlist_v2.txt'
            self.syn_dbImage = ImageData_lmdb(
                syn_db_path.format('ImageData.Rawjpg'),
                always_load_color=False)  # cv2.IMREAD_UNCHANGED
            self.syn_dbNorm = ImageData_lmdb(
                syn_db_path.format('NormCamera.Rawpng'),
                always_load_color=False)  # cv2.IMREAD_UNCHANGED
            self.syn_dbValid = ImageData_lmdb(
                syn_db_path.format('Valid.Rawpng'),
                always_load_color=False)  # cv2.IMREAD_UNCHANGED
            syn_keys = list(map(str.strip, open(syn_id_path).readlines()))
            if sampling['syn'] < 1:
                syn_keys = sample_keys(syn_keys, sampling['syn'])
        else:
            syn_keys = []
        #
        nyu_keys = list(
            map(lambda x: x.strip().split('/')[1],
                open(id_path.format(
                    collection)).readlines()))  # testNdxs.txt   trainNdxs.txt
        if sampling['nyu'] < 1.0:
            print('Sampling dataset: %s' % sampling)
            nyu_keys = sample_keys(nyu_keys, sampling['nyu'])

        #
        self.keys = nyu_keys + syn_keys  # Note: NYU keys always goes first
        self.key2ind = dict(zip(self.keys, range(len(self.keys))))
        self._nyu_idx_range = len(nyu_keys)
        #
        self.style = style  # 'pytorch', 'caffe'
        self.transform = transforms.Compose([
            transforms.ToPILImage(),
            transforms.ToTensor(
            ),  # Converts np_arr (H x W x C) in the range [0, 255]
            # to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
            transforms.Normalize(
                mean=[0.86067367, 0.86067367, 0.86067367],
                std=[0.22848366, 0.22848366, 0.22848366],
            )
        ])
        #
        print('---------------- Dataset_Base -------------------')
        print('         collection : %s' % collection)
        print('         len(keys)  : %s' % len(self.keys))
        print('           net_arch : %s' % net_arch)
        print('          with_flip : %s' % with_flip)
        print('       sampling NYU : %5s%%' % (sampling['nyu'] * 100))
        print('       sampling Syn : %5s%%' % (sampling['syn'] * 100))
        print('--------------------------------------------------')
def test(dataset_test,
         work_dir,
         test_model=None,
         marker='epoch',
         save_pred=False,
         train_epoch=None):
    out_rslt_path = work_dir + '/temp.out_rslt_path.txt'
    out_eval_path = work_dir + '/temp.out_eval_path.txt'

    if test_model is None:
        test_model = model
        #---- Load trained weights here.------
        assert os.path.exists(work_dir)
        #
        iter_nums, net_name = list_models(work_dir, marker=marker)
        saved_iter_num = iter_nums[-1]
        pretrained_model = work_dir + '/%s_%s_%s.pth.tar' % (
            net_name, marker, saved_iter_num)  # select maxmun iter number.
        print('[pretrained_model] ', pretrained_model)

        checkpoint = torch.load(pretrained_model)  # load weights here.
        _state_dict = patch_saved_DataParallel_state_dict(
            checkpoint['state_dict'])
        test_model.load_state_dict(_state_dict)

    # switch to train mode
    test_model.eval()
    gEval_redu = reducer_group(*['Es'])

    pre_time = time.time()
    it = -1
    epoch = -1
    #
    keys = dataset_test.keys
    test_loader = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=_cfg.TEST.BATCH_SIZE * nr_GPUs,
        shuffle=False,
        num_workers=opt.num_workers * nr_GPUs,
        pin_memory=opt.pin_memory,
        sampler=None)

    if save_pred:
        from lmdb_util import NpyData_lmdb, ImageData_lmdb
        pred_db_path = os.path.join(work_dir, 'PredNormal.Rawpng.lmdb')
        pred_db = ImageData_lmdb(pred_db_path, 'w')

    for _i_, sample_batched in enumerate(test_loader):
        #
        rec_inds = sample_batched['idx'].numpy()
        #
        it += 1

        # Note: Tensor.cuda()   Returns a copy of this object in CUDA memory.
        data = sample_batched['data'].to(device, non_blocking=True)
        # formulate GT dict
        _gt_targets = model.gt_targets if hasattr(
            model, 'gt_targets') else model.targets
        GT = edict()
        GT['mask'] = sample_batched['mask'].to(device, non_blocking=True)
        for tgt in _gt_targets:
            GT[tgt] = sample_batched[tgt].to(device, non_blocking=True)

        # compute Pred output
        Prob = test_model(data)

        # compute Loss for each target and formulate Loss dictionary.
        Loss, _Metric_ = test_model.compute_loss(Prob, GT)

        total_loss = 0
        for tgt in watch_targets:
            total_loss += Loss[tgt]  # * loss_weight
        if 'norm' in _Metric_.keys():
            _Es = _Metric_['norm'].data.cpu().numpy().copy()
            gEval_redu.collect(dict(Es=_Es), squeeze=False)

        # predict as images
        if save_pred:
            Pred = test_model.compute_pred(Prob, encode_bit=8)  #
            predNormImgs = Pred.norm  #  NxHxWx3
            assert len(rec_inds) == len(predNormImgs)
            for i, idx in enumerate(rec_inds):
                key = keys[idx]
                pred_db[key] = predNormImgs[i]

        # print loss info
        cur_time = time.time()
        time_consume = cur_time - pre_time
        pre_time = cur_time
        print('\r %s [test-iter] %5d / %5d ---------[time_consume] %.2f' %
              (strftime("%Y-%m-%d %H:%M:%S",
                        gmtime()), it, len(test_loader), time_consume))
        for trgt in watch_targets:
            _loss = Loss[trgt].data.cpu().numpy().copy()
            print('  %-15s  loss=%.3f' % (trgt, _loss))
            if np.isnan(_loss):
                print("[Warning]  Weights explode!  Stop training ... ")
                exit(-1)

        _watch_targets = watch_targets + [
            'norm'
        ] if 'norm' not in watch_targets else watch_targets
        for tgt in _watch_targets:
            if tgt == 'sgc_norm':  # in metric.
                logprint('   %-10s  [Acc] : %5.1f%%' %
                         (tgt, _Metric_['sgc_norm_acc'] * 100))
            else:
                if tgt in _Metric_.keys():
                    Es = _Metric_[tgt].data.cpu().numpy().copy()
                    mean = np.mean(Es)
                    median = np.median(Es)
                    rmse = np.sqrt(np.mean(np.power(Es, 2)))
                    acc11 = np.mean(Es < 11.25) * 100
                    acc22 = np.mean(Es < 22.5) * 100
                    acc30 = np.mean(Es < 30) * 100
                    logprint(
                        '   %-10s  [mean]: %5.1f   [median]: %5.1f   [rmse]: %5.1f   [acc{11,22,30}]: %5.1f%%, %5.1f%%, %5.1f%%'
                        % (tgt, mean, median, rmse, acc11, acc22, acc30))

        print(
            "\r[work_dir] %s \r" %
            os.path.abspath(work_dir)[len(os.path.abspath(opt.base_dir)) + 1:],
            end='',
            flush=True)

    Es = gEval_redu.reduce()['Es']
    mean = np.mean(Es)
    median = np.median(Es)
    rmse = np.sqrt(np.mean(np.power(Es, 2)))
    acc11 = np.mean(Es < 11.25) * 100
    acc22 = np.mean(Es < 22.5) * 100
    acc30 = np.mean(Es < 30) * 100
    summary_str = ''
    if train_epoch is not None:
        summary_str += '[Test at epoch %d]\n' % train_epoch
    summary_str += '  [mean]: %5.1f   [median]: %5.1f   [rmse]: %5.1f   [acc{11,22,30}]: %5.1f%%, %5.1f%%, %5.1f%%\n' % (
        mean, median, rmse, acc11, acc22, acc30)
    logprint(summary_str)
    Open(out_eval_path, 'a+').write(summary_str)

    if save_pred:
        mean, median, rmse, acc11, acc22, acc30 = eval_all(
            pred_db_path, use_multiprocess=False)
        summary_str = '\n--------------------------------------------'
        summary_str += '[Test] %s \n' % pretrained_model
        summary_str += '  [mean]: %5.1f   [median]: %5.1f   [rmse]: %5.1f   [acc{11,22,30}]: %5.1f%%, %5.1f%%, %5.1f%%\n' % (
            mean, median, rmse, acc11, acc22, acc30)
        Open(out_eval_path, 'a+').write(summary_str)
        print(summary_str)

    return mean, median, rmse, acc11, acc22, acc30