Exemple #1
0
def build_db():

    try: os.makedirs(db_path)
    except: print('Make dirs skipped!')


    # NpyData_lmdb
    imdb = ImageData_lmdb(db_path, 'w')

    cate2imgIDs = {}
    # from multiprocessing import Pool
    for cate, synsetID in cate_synsetID:
        cate_dir = os.path.join(src_img_dir, synsetID)
        shapeIDs = [x for x in os.listdir(cate_dir) if os.path.isdir(os.path.join(cate_dir,x))]
        print(cate)
        for _k, shapeID in enumerate(tqdm(shapeIDs)):
            sys.stdout.flush()
            cate_shape_dir = os.path.join(cate_dir, shapeID)
            imgIDs = [x[:-4] for x in os.listdir(cate_shape_dir) if x.endswith('.png')]
            for imgID in imgIDs:
                imgpath = os.path.join(cate_shape_dir,imgID+'.png')
                cate2imgIDs.setdefault(cate, []).append(imgID)
                # write to lmdb.
                #-# img = cv2.imread(imgpath) # , cv2.IMREAD_UNCHANGED)
                #-# imdb[imgID] = img
                imdb.put(imgID, imgpath)

        Open(os.path.join(base_dir,'cate2imgIDs/%s.txt'%cate), 'w').write('\n'.join(cate2imgIDs[cate])+'\n')
        # exit()

    pickle.dump(cate2imgIDs, Open(os.path.join(base_dir,'cate2imgIDs.pkl'), 'wb'), protocol)
    return db_path
def main(
        collection='train',
        filter='all',
        cates=categories,  # cates=['aeroplane','boat','car'],  #
):

    out_dir = '../anno_db_v2/data.cache/objId2gtbox'

    try:
        os.makedirs(out_dir)
    except:
        print('Make dirs skipped!')

    # from multiprocessing import Pool
    objId2gtbox = dict()
    nr_box = 0
    for cate in cates:
        print(' >>> %10s %5s  %20s    ' % (collection, filter, cate))
        objIDs, rcobjs = get_anno(cate, collection=collection, filter=filter)

        for _k, rcobj in enumerate(rcobjs):  # tqdm()
            gt_box = process(rcobj)  # resize_shape cate, _k, len(rcobjs)
            objId2gtbox[rcobj.obj_id] = gt_box
            nr_box += 1

    outpath = os.path.join(
        out_dir, 'cate%s_%s.%s.pkl' % (len(cates), collection, filter))
    pickle.dump(objId2gtbox, Open(outpath, 'wb'), protocol)
    print('[outpath]: ', outpath)
    print('nr_box:  ', nr_box)
Exemple #3
0
def test(dataset_test, work_dir, test_model=None, marker='epoch'):
    out_rslt_path = work_dir + '/temp.out_rslt_path.txt'
    out_eval_path = work_dir + '/temp.out_eval_path.txt'

    if test_model is None:
        test_model = model
        #---- Load trained weights here.------
        assert os.path.exists(work_dir)
        #
        iter_nums, net_name = list_models(work_dir, marker=marker)
        saved_iter_num = iter_nums[-1]
        pretrained_model = work_dir + '/%s_%s_%s.pth.tar' % (
            net_name, marker, saved_iter_num)  # select maxmun iter number.
        print('[pretrained_model] ', pretrained_model)

        checkpoint = torch.load(pretrained_model)  # load weights here.
        _state_dict = patch_saved_DataParallel_state_dict(
            checkpoint['state_dict'])
        test_model.load_state_dict(_state_dict)

    # switch to train mode
    test_model.eval()
    gLoss_redu = reducer_group(*watch_targets)
    gPred_redu = reducer_group(*['quat'])

    pre_time = time.time()
    it = -1
    epoch = -1
    #
    keys = dataset_test.keys
    test_loader = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=_cfg.TEST.BATCH_SIZE * nr_GPUs,
        shuffle=False,
        num_workers=opt.num_workers * nr_GPUs,
        pin_memory=opt.pin_memory,
        sampler=None)

    with torch.no_grad():
        pbar = tqdm(test_loader)

        for _i_, sample_batched in enumerate(pbar):
            pbar.set_description("[work_dir] %s  " % _short_work_dir)
            it += 1

            # Note: Tensor.cuda()   Returns a copy of this object in CUDA memory.
            label = torch.autograd.Variable(
                sample_batched['label'].cuda(non_blocking=True))
            data = torch.autograd.Variable(
                sample_batched['data'].cuda(non_blocking=True))
            # formulate GT dict
            _gt_targets = test_model.gt_targets if hasattr(
                test_model, 'gt_targets') else test_model.targets
            GT = edict()
            for tgt in _gt_targets:
                GT[tgt] = torch.autograd.Variable(
                    sample_batched[tgt].cuda(non_blocking=True))

            # compute Pred output
            Prob = test_model(data, label)

            # compute Loss for each target and formulate Loss dictionary.
            Loss = test_model.compute_loss(Prob, GT)

            total_loss = 0
            for tgt in watch_targets:
                total_loss += Loss[tgt]

            # predict target angles value
            Pred = test_model.compute_pred(Prob)

            gLoss_redu.collect(
                Loss)  # pass in dict of all loss (loss_a, loss_e, loss_t).
            gPred_redu.collect(Pred, squeeze=False)

            # print loss info
            cur_time = time.time()
            time_consume = cur_time - pre_time
            pre_time = cur_time
            print('\r %s [test-iter] %5d / %5d ---------[time_consume] %.2f' %
                  (strftime("%Y-%m-%d %H:%M:%S",
                            gmtime()), it, len(test_loader), time_consume))
            for trgt in watch_targets:
                _loss = Loss[trgt].data.cpu().numpy().copy()
                print('  %-15s  loss=%.3f' % (trgt, _loss))
                if np.isnan(_loss):
                    print("[Warning]  Weights explode!  Stop training ... ")
                    exit(-1)

            # pbar.set_description("[work_dir] %s  " % os.path.abspath(work_dir)[len(os.path.abspath(opt.base_dir))+1:])
            # print ("\r[work_dir] %s \r" % os.path.abspath(work_dir)[len(os.path.abspath(opt.base_dir))+1:],end='')
            sys.stdout.flush()

    pred_quats = gPred_redu.reduce()['quat']

    #-- Write result to file  (Format: # {obj_id}  {a} {e} {t} )
    txtTbl = TxtTable(
        '{obj_id:<20s}   {a:>6.4f}  {b:>6.4f}  {c:>6.4f}  {d:>6.4f}')
    rslt_lines = [txtTbl.getHeader()]
    for _k, _quat in zip(keys, pred_quats):
        _a, _b, _c, _d = _quat
        rslt_line = txtTbl.format(_k, _a, _b, _c, _d)
        rslt_lines.append(rslt_line)
    rslt_lines = '\n'.join(rslt_lines)
    Open(out_rslt_path, 'w').write(rslt_lines)
    #
    print('[out_rslt_path]', out_rslt_path)

    #-- Do evaluation  ('MedError', 'Acc@theta')
    from numpy_db import npy_table
    rc_tbl = npy_table(dataset_test.recs)
    #
    summary_str = eval_cates(
        out_rslt_path,
        rc_tbl,
        cates=opt.cates,
        theta_levels_str='pi/6  pi/12  pi/24')  # ['aeroplane','boat','car'])
    Open(out_eval_path, 'w').write(summary_str)
    print(summary_str)

    reca = TxtTable().load_as_recarr(
        out_eval_path,
        fields=['MedError', 'Acc@pi/6', 'Acc@pi/12', 'Acc@pi/24'])

    return reca[-1]
Exemple #4
0
work_dir += '.%s' % opt.train_view
_short_work_dir = os.path.abspath(
    work_dir)[len(os.path.abspath(opt.base_dir)) + 1:]

# global state variables.
start_it = 0
start_epoch = 0
from pytorch_util.libtrain import rm_models, list_models
from pytorch_util.libtrain.reducer import reducer, reducer_group

# Log file.
script_name, _ = os.path.splitext(os.path.basename(__file__))
log_filename = '%s/%s.log' % (work_dir, script_name)
if os.path.exists(log_filename):  # backup previous content.
    pre_log_content = open(log_filename).read()
logf = Open(log_filename, 'w')


def logprint(s):
    print("\r%s                            " % s)
    logf.write(s + "\n")


#-- Resume or use pretrained (Note not imagenet pretrain.)
assert not (opt.resume
            and opt.pretrain is not None), 'Only resume or pretrain can exist.'
if opt.resume:
    iter_nums, net_name = list_models(work_dir)  # ('snapshots')
    assert len(iter_nums) > 0, "No models available"
    latest_model_name = os.path.join(
        work_dir, '%s_iter_%s.pth.tar' % (net_name, iter_nums[-1]))
def test(dataset_test,
         work_dir,
         test_model=None,
         marker='epoch',
         save_pred=False,
         train_epoch=None):
    out_rslt_path = work_dir + '/temp.out_rslt_path.txt'
    out_eval_path = work_dir + '/temp.out_eval_path.txt'

    if test_model is None:
        test_model = model
        #---- Load trained weights here.------
        assert os.path.exists(work_dir)
        #
        iter_nums, net_name = list_models(work_dir, marker=marker)
        saved_iter_num = iter_nums[-1]
        pretrained_model = work_dir + '/%s_%s_%s.pth.tar' % (
            net_name, marker, saved_iter_num)  # select maxmun iter number.
        print('[pretrained_model] ', pretrained_model)

        checkpoint = torch.load(pretrained_model)  # load weights here.
        _state_dict = patch_saved_DataParallel_state_dict(
            checkpoint['state_dict'])
        test_model.load_state_dict(_state_dict)

    # switch to train mode
    test_model.eval()
    gEval_redu = reducer_group(*['Es'])

    pre_time = time.time()
    it = -1
    epoch = -1
    #
    keys = dataset_test.keys
    test_loader = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=_cfg.TEST.BATCH_SIZE * nr_GPUs,
        shuffle=False,
        num_workers=opt.num_workers * nr_GPUs,
        pin_memory=opt.pin_memory,
        sampler=None)

    if save_pred:
        from lmdb_util import NpyData_lmdb, ImageData_lmdb
        pred_db_path = os.path.join(work_dir, 'PredNormal.Rawpng.lmdb')
        pred_db = ImageData_lmdb(pred_db_path, 'w')

    for _i_, sample_batched in enumerate(test_loader):
        #
        rec_inds = sample_batched['idx'].numpy()
        #
        it += 1

        # Note: Tensor.cuda()   Returns a copy of this object in CUDA memory.
        data = sample_batched['data'].to(device, non_blocking=True)
        # formulate GT dict
        _gt_targets = model.gt_targets if hasattr(
            model, 'gt_targets') else model.targets
        GT = edict()
        GT['mask'] = sample_batched['mask'].to(device, non_blocking=True)
        for tgt in _gt_targets:
            GT[tgt] = sample_batched[tgt].to(device, non_blocking=True)

        # compute Pred output
        Prob = test_model(data)

        # compute Loss for each target and formulate Loss dictionary.
        Loss, _Metric_ = test_model.compute_loss(Prob, GT)

        total_loss = 0
        for tgt in watch_targets:
            total_loss += Loss[tgt]  # * loss_weight
        if 'norm' in _Metric_.keys():
            _Es = _Metric_['norm'].data.cpu().numpy().copy()
            gEval_redu.collect(dict(Es=_Es), squeeze=False)

        # predict as images
        if save_pred:
            Pred = test_model.compute_pred(Prob, encode_bit=8)  #
            predNormImgs = Pred.norm  #  NxHxWx3
            assert len(rec_inds) == len(predNormImgs)
            for i, idx in enumerate(rec_inds):
                key = keys[idx]
                pred_db[key] = predNormImgs[i]

        # print loss info
        cur_time = time.time()
        time_consume = cur_time - pre_time
        pre_time = cur_time
        print('\r %s [test-iter] %5d / %5d ---------[time_consume] %.2f' %
              (strftime("%Y-%m-%d %H:%M:%S",
                        gmtime()), it, len(test_loader), time_consume))
        for trgt in watch_targets:
            _loss = Loss[trgt].data.cpu().numpy().copy()
            print('  %-15s  loss=%.3f' % (trgt, _loss))
            if np.isnan(_loss):
                print("[Warning]  Weights explode!  Stop training ... ")
                exit(-1)

        _watch_targets = watch_targets + [
            'norm'
        ] if 'norm' not in watch_targets else watch_targets
        for tgt in _watch_targets:
            if tgt == 'sgc_norm':  # in metric.
                logprint('   %-10s  [Acc] : %5.1f%%' %
                         (tgt, _Metric_['sgc_norm_acc'] * 100))
            else:
                if tgt in _Metric_.keys():
                    Es = _Metric_[tgt].data.cpu().numpy().copy()
                    mean = np.mean(Es)
                    median = np.median(Es)
                    rmse = np.sqrt(np.mean(np.power(Es, 2)))
                    acc11 = np.mean(Es < 11.25) * 100
                    acc22 = np.mean(Es < 22.5) * 100
                    acc30 = np.mean(Es < 30) * 100
                    logprint(
                        '   %-10s  [mean]: %5.1f   [median]: %5.1f   [rmse]: %5.1f   [acc{11,22,30}]: %5.1f%%, %5.1f%%, %5.1f%%'
                        % (tgt, mean, median, rmse, acc11, acc22, acc30))

        print(
            "\r[work_dir] %s \r" %
            os.path.abspath(work_dir)[len(os.path.abspath(opt.base_dir)) + 1:],
            end='',
            flush=True)

    Es = gEval_redu.reduce()['Es']
    mean = np.mean(Es)
    median = np.median(Es)
    rmse = np.sqrt(np.mean(np.power(Es, 2)))
    acc11 = np.mean(Es < 11.25) * 100
    acc22 = np.mean(Es < 22.5) * 100
    acc30 = np.mean(Es < 30) * 100
    summary_str = ''
    if train_epoch is not None:
        summary_str += '[Test at epoch %d]\n' % train_epoch
    summary_str += '  [mean]: %5.1f   [median]: %5.1f   [rmse]: %5.1f   [acc{11,22,30}]: %5.1f%%, %5.1f%%, %5.1f%%\n' % (
        mean, median, rmse, acc11, acc22, acc30)
    logprint(summary_str)
    Open(out_eval_path, 'a+').write(summary_str)

    if save_pred:
        mean, median, rmse, acc11, acc22, acc30 = eval_all(
            pred_db_path, use_multiprocess=False)
        summary_str = '\n--------------------------------------------'
        summary_str += '[Test] %s \n' % pretrained_model
        summary_str += '  [mean]: %5.1f   [median]: %5.1f   [rmse]: %5.1f   [acc{11,22,30}]: %5.1f%%, %5.1f%%, %5.1f%%\n' % (
            mean, median, rmse, acc11, acc22, acc30)
        Open(out_eval_path, 'a+').write(summary_str)
        print(summary_str)

    return mean, median, rmse, acc11, acc22, acc30
def filter_img_set(img_set,
                   filter='easy',
                   source='pascal',
                   include_coarse_vp_anno=False,
                   MAX_IMAGE_SIDE=None):
    print('=================[%s_%s] %s' % (source, img_set, filter))
    # {'flip':1, 'aug_n':1,'jitter_IoU':1,'difficult':1,'truncated':1,'occluded':1}
    if filter == 'all':
        opts = None
    elif filter == 'easy':  # Select option opts: Mark 1 as to be kept, 0: to be filtered.
        opts = edict({
            'difficult': 0,
            'truncated': 0,
            'occluded': 0
        })  # filter out truncated, occluded but keep difficult
    elif filter == 'nonOccl':
        opts = edict({
            'difficult': 1,
            'truncated': 0,
            'occluded': 0
        })  # filter out truncated, occluded but keep difficult
    elif filter == 'nonDiff':
        opts = edict({
            'difficult': 0,
            'truncated': 1,
            'occluded': 1
        })  # filter out difficult but keep truncated, occluded
    else:
        raise NotImplementedError

    cnt_image = 0
    for cate in categories:
        obj_list = []

        if source == 'pascal':
            filename = PascalVOC_root + '/VOCdevkit/VOC2012/ImageSets/Main/%s.txt' % img_set
            all_ids = list(map(str.strip, open(filename).readlines()))
        elif source == 'imagenet':
            # /Users/shine/QuvaMnt/working/cvpr17pose/dataset/PASCAL3D+_release1.1/Image_sets/aeroplane_imagenet_val.txt
            filename = Pascal3D_root + '/Image_sets/%s_imagenet_%s.txt' % (
                cate, img_set)
            all_ids = list(map(str.strip, open(filename).readlines()))
        else:
            raise NotImplementedError

        cnt_obj = 0
        for i, id in enumerate(all_ids):
            anno_filename = os.path.join(
                py_anno_dir, '%s_%s/%s.pkl' %
                (cate, source, id))  # aeroplane_pascal/xxx.pkl
            if not os.path.exists(anno_filename):
                # print 'File not exists: ', anno_filename
                continue

            selected_objs = read_anno_pkl(anno_filename, cate, source, id,
                                          img_set, opts,
                                          include_coarse_vp_anno,
                                          MAX_IMAGE_SIDE)
            if len(selected_objs) > 0:
                cnt_obj += len(selected_objs)
                obj_list += selected_objs
            # else:
            #     print anno_filename

        # print "-------------------------------------   len(obj_list): ", len(obj_list)
        obj_rcs = np.vstack(obj_list).reshape(
            (-1, ))  # np.concatenate(obj_list, axis=0) # np.vstack(obj_list)
        obj_rcs = obj_rcs.view(np.recarray)  # cast as np.recarray
        img_ids = set(obj_rcs.src_img.image_id.tolist())
        cnt_image += len(img_ids)
        print('%-20s img %5s   obj  %5s' % (cate, len(img_ids), len(obj_rcs)))

        # print '%-20s img %5s  obj  %5s' % (cate, len(set(obj_rcs.src_img.image_id.tolist())), len(obj_rcs))
        filterStr = filter + '_withCoarseVp' if include_coarse_vp_anno else filter
        maxSideStr = 'Org' if (
            MAX_IMAGE_SIDE is None) else 'Max%s' % MAX_IMAGE_SIDE
        pickle.dump(
            obj_rcs,
            Open(
                'working_dump.cache/Catewise_obj_anno/%s.%s/%s_%s.%s.pkl' %
                (filterStr, maxSideStr, source, img_set, cate), 'wb'),
            protocol)

    print('selected:  %d / %d  images' % (cnt_image, len(all_ids)))
Exemple #7
0
import pickle

from basic.util import load_yaml

conf = load_yaml('config.yml')  # odict
Pascal3D_root = os.path.expanduser(conf['Pascal3D_release_root'])
protocol = conf[
    'pkl_protocol']  # pickle dump protocol. Change -1 to 2 for python2.x compatibility.

mat_anno_dir = os.path.join(Pascal3D_root, 'Annotations')
new_anno_dir = os.path.join('./working_dump.cache', 'Imgwise_Annotations.py')
try:
    os.makedirs(new_anno_dir)
except:
    pass

all_fos = [
    x for x in os.listdir(mat_anno_dir)
    if os.path.isdir(os.path.join(mat_anno_dir, x))
]
for i, fo in enumerate(all_fos):
    print('[%2d/%2d]  %s   ' % (i, len(all_fos), fo))
    for f in [
            x for x in os.listdir(os.path.join(mat_anno_dir, fo))
            if x.endswith('.mat')
    ]:
        matfile = os.path.join(mat_anno_dir, fo, f)
        pklfile = os.path.join(new_anno_dir, fo, f[:-4] + '.pkl')
        struct_dict = loadmat(matfile)
        pickle.dump(struct_dict, Open(pklfile, 'wb'), protocol)
def test():

    out_rslt_path = os.path.join(opt.work_dir, 'rslt.cache.txt')
    out_eval_path = os.path.join(opt.work_dir, 'eval.cache.txt')
    keys = dataset_test.keys

    # switch to train mode
    model.eval()

    # # loss reducer
    from pytorch_util.libtrain.reducer import reducer, reducer_group
    gLoss_redu = reducer_group(*watch_targets)
    gPred_redu = reducer_group(*['a', 'e', 't'])

    pre_time = time.time()
    it = -1
    epoch = -1

    # for _i_, sample_batched in enumerate(test_loader):

    with torch.no_grad():
        pbar = tqdm(test_loader)
        for _i_, sample_batched in enumerate(pbar):
            pbar.set_description("[work_dir] %s  " %
                                 os.path.relpath(opt.work_dir))

            it += 1

            # prepare input data
            label = sample_batched['label'].to(device, non_blocking=True)
            data = sample_batched['data'].to(device, non_blocking=True)
            # formulate GT dict
            _gt_targets = model.gt_targets if hasattr(
                model, 'gt_targets') else model.targets
            GT = edict()
            for tgt in _gt_targets:
                GT[tgt] = sample_batched[tgt].to(device, non_blocking=True)

            # compute Pred output
            Prob = model(data, label)

            # compute Loss for each target and formulate Loss dictionary.
            Loss = model.compute_loss(Prob, GT)

            total_loss = 0
            for tgt in watch_targets:
                total_loss += Loss[tgt]  # * loss_weight

            # predict target angles value
            Pred = model.compute_pred(Prob)

            gLoss_redu.collect(
                Loss)  # pass in dict of all loss (loss_a, loss_e, loss_t).
            gPred_redu.collect(Pred)

            # print loss info
            cur_time = time.time()
            time_consume = cur_time - pre_time
            pre_time = cur_time

    name2pred = gPred_redu.reduce()

    # convert prediction back to [0, 360]
    a, e, t = dataset_test.pred2angle(
        name2pred['a'], name2pred['e'],
        name2pred['t'])  # or call _dataset_module.pred2angle

    #-- Write result to file  (Format: # {obj_id}  {a} {e} {t} )
    rslt_lines = '%-40s  %5s  %5s  %5s\n' % ('# obj_id', 'a', 'e', 't')
    rslt_lines += ''.join([
        '%-40s  %5.1f  %5.1f  %5.1f\n' % (_k, _a, _e, _t)
        for _k, _a, _e, _t in zip(keys, a, e, t)
    ])
    Open(out_rslt_path, 'w').write(rslt_lines)
    print("[output] ", out_rslt_path)

    #-- Do evaluation  ('MedError', 'Acc@theta')
    summary_str = eval_cates(
        out_rslt_path, cates=opt.cates,
        theta_levels_str='pi/6  pi/12  pi/24')  # ['aeroplane','boat','car'])
    Open(out_eval_path, 'w').write(summary_str)
    print(summary_str)

    reca = TxtTable().load_as_recarr(
        out_eval_path,
        fields=['MedError', 'Acc@pi/6', 'Acc@pi/12', 'Acc@pi/24'])

    return reca[-1]