Example #1
0
def add_part_indicatormap(data_dir, save_dir, mdim, rate, filter_size, stride):
    """
    This function is used for generating part indicator map for old data
    data_dir is the directory that you put all batch_datayes
    """
    allfile = iu.getfilelist(data_dir, 'data_batch_\d+')
    meta_path = iu.fullfile(data_dir, 'batches.meta')
    iu.ensure_dir(save_dir)
    if iu.exists(meta_path, 'file'): 
        d_meta = myio.unpickle(meta_path)
        if 'savedata_info' not in d_meta:
            d_meta['savedata_info'] = dict()
            d_meta['savedata_info']['indmap_para'] = dict()
        d_meta['savedata_info']['indmap_para']['filter_size'] = filter_size
        d_meta['savedata_info']['indmap_para']['stride'] = stride
        d_meta['savedata_info']['indmap_para']['rate'] = rate 
        myio.pickle(iu.fullfile(save_dir, 'batches.meta'), d_meta)        
    for fn in allfile:
        print 'Processing %s' % fn
        d = myio.unpickle(iu.fullfile(data_dir, fn))
        ndata = d['data'].shape[-1]
        nparts = 7
        d['indmap'] = np.zeros((nparts, mdim[0], mdim[1], ndata), dtype=np.bool) 
        for i in range(ndata):
            jts = d['joints8'][...,i]
            d['indmap'][...,i] = HMLPE.create_part_indicatormap(jts, part_idx,  mdim, rate, filter_size, stride)
        myio.pickle(iu.fullfile(save_dir, fn), d)
Example #2
0
def batch_generate_pymeta(data_root_folder, force_to_generate=False):
    """
    This function will convert batches into pymeta style which can be loaded by convnet
    """
    allfolder = iu.getfolderlist(data_root_folder)
    print "Get %d folders" % len(allfolder)
    l = []
    import sys

    for fn in allfolder:
        a = DHMLPE()
        fp = iu.fullfile(data_root_folder, fn, "matlab_meta.mat")
        if iu.exists(fp, "file"):
            save_fp = iu.fullfile(data_root_folder, fn, "batches.meta")
            print "-----------------------------"
            print "Processing ", fp
            if iu.exists(save_fp, "file") and not force_to_generate:
                print "Ha ha, it exists!"
            else:
                meta = a.get_convnet_meta(fp)
                mio.pickle(save_fp, meta)
            print "Saved %s" % save_fp
        else:
            l = l + [fp]
    print "=============\n"
    print "Here is what I cannot find (%d in total)" % len(l)
    print l
Example #3
0
def MakeDataFromImages(imgdir, max_per_batch, save_dir=None, save_name=None):
    import iutils as iu
    import iconvnet_datacvt as icvt
    from PIL import Image
    if max_per_batch == 0:
        raise CifarError('max_per_batch can' 't not be zero')
    allfiles = iu.getfilelist(imgdir, '.*jpg|.*bmp|.*png$')
    ndata = len(allfiles)
    iu.ensure_dir(save_dir)
    d = PrepareData(min(max_per_batch, ndata))
    j = 0
    if save_name is None:
        save_name = 'data_batch'
    bid = 1
    for i, fn in enumerate(allfiles):
        if j == max_per_batch:
            j = 0
            if not save_dir is None:
                icvt.ut.pickle(
                    iu.fullfile(save_dir, save_name + '_' + str(bid)), d)
                bid = bid + 1
            if ndata - i < max_per_batch:
                d = PrepareData(ndata - i)
        fp = iu.fullfile(imgdir, fn)

        img = iu.imgproc.ensure_rgb(np.asarray(Image.open(fp)))
        img = Image.fromarray(img).resize((img_size[0], img_size[1]))
        arr_img = np.asarray(img).reshape((dim_data), order='F')
        d['data'][..., j] = arr_img
        j = j + 1
    if not save_dir is None:
        icvt.ut.pickle(iu.fullfile(save_dir, save_name + '_' + str(bid)), d)
Example #4
0
 def generate_positive_data(self, generate_type, allfile=None):
     """
     generate_type = 'rt': random translation
                     'ct'  center block
     """
     if allfile is None:
         allfile = iu.getfilelist(self.imgdata_info['imgdatapath'],
                                  '\w+\.mat')
     print 'imgdatapath=%s, %d files are found' % (
         self.imgdata_info['imgdatapath'], len(allfile))
     iu.ensure_dir(self.savedata_info['savedir'])
     self.batch_id = self.savedata_info['start_patch_id']
     self.init_meta(generate_type)
     print self.meta
     np.random.seed(7)
     for fn in allfile:
         print 'Processing %s ' % fn
         mpath = iu.fullfile(self.imgdata_info['imgdatapath'], fn)
         self.generate_positive_data_from_mat(generate_type,
                                              iu.fullfile(mpath))
     if self.meta['ndata'] > 0:
         self.meta['data_mean'] = self.meta['data_sum'] / self.meta['ndata']
         self.meta['data_mean'] = self.meta['data_mean'].reshape((-1, 1))
     else:
         self.meta['data_mean'] = 0
     del self.meta['data_sum']
     myio.pickle(iu.fullfile(self.savedata_info['savedir'], 'batches.meta'),
                 self.meta)
Example #5
0
def batch_generate_pymeta(data_root_folder, force_to_generate=False):
    """
    This function will convert batches into pymeta style which can be loaded by convnet
    """
    allfolder = iu.getfolderlist(data_root_folder)
    print 'Get %d folders' % len(allfolder)
    l = []
    import sys
    for fn in allfolder:
        a = DHMLPE()
        fp = iu.fullfile(data_root_folder, fn, 'matlab_meta.mat')
        if iu.exists(fp, 'file'):
            save_fp = iu.fullfile(data_root_folder, fn, 'batches.meta')
            print '-----------------------------'
            print 'Processing ', fp
            if iu.exists(save_fp, 'file') and not force_to_generate:
                print 'Ha ha, it exists!'
            else:
                meta = a.get_convnet_meta(fp)
                mio.pickle(save_fp, meta)
            print 'Saved %s' % save_fp
        else:
            l = l + [fp]
    print '=============\n'
    print 'Here is what I cannot find (%d in total)' % len(l)
    print l
Example #6
0
def MakeDataFromImages(imgdir, max_per_batch , save_dir = None, save_name=None):
    import iutils as iu
    import iconvnet_datacvt as icvt
    from PIL import Image
    if max_per_batch == 0:
        raise CifarError('max_per_batch can''t not be zero')
    allfiles = iu.getfilelist(imgdir, '.*jpg|.*bmp|.*png$')
    ndata = len(allfiles)
    iu.ensure_dir(save_dir)
    d = PrepareData(min(max_per_batch, ndata))
    j = 0
    if save_name is None:
        save_name = 'data_batch'
    bid = 1
    for i,fn in enumerate(allfiles):
        if j == max_per_batch:
            j = 0
            if not save_dir is None:
                icvt.ut.pickle(iu.fullfile(save_dir, save_name + '_' + str(bid)), d)
                bid = bid + 1 
            if ndata - i < max_per_batch:
                d = PrepareData(ndata-i)
        fp = iu.fullfile(imgdir, fn)
        
        img = iu.imgproc.ensure_rgb(np.asarray(Image.open(fp)))
        img = Image.fromarray(img).resize((img_size[0],img_size[1]))
        arr_img = np.asarray(img).reshape((dim_data), order='F')
        d['data'][...,j] = arr_img
        j = j + 1
    if not save_dir is None:
        icvt.ut.pickle(iu.fullfile(save_dir, save_name + '_' + str(bid)), d)         
Example #7
0
def add_part_indicatormap(data_dir, save_dir, mdim, rate, filter_size, stride):
    """
    This function is used for generating part indicator map for old data
    data_dir is the directory that you put all batch_datayes
    """
    allfile = iu.getfilelist(data_dir, 'data_batch_\d+')
    meta_path = iu.fullfile(data_dir, 'batches.meta')
    iu.ensure_dir(save_dir)
    if iu.exists(meta_path, 'file'):
        d_meta = myio.unpickle(meta_path)
        if 'savedata_info' not in d_meta:
            d_meta['savedata_info'] = dict()
            d_meta['savedata_info']['indmap_para'] = dict()
        d_meta['savedata_info']['indmap_para']['filter_size'] = filter_size
        d_meta['savedata_info']['indmap_para']['stride'] = stride
        d_meta['savedata_info']['indmap_para']['rate'] = rate
        myio.pickle(iu.fullfile(save_dir, 'batches.meta'), d_meta)
    for fn in allfile:
        print 'Processing %s' % fn
        d = myio.unpickle(iu.fullfile(data_dir, fn))
        ndata = d['data'].shape[-1]
        nparts = 7
        d['indmap'] = np.zeros((nparts, mdim[0], mdim[1], ndata),
                               dtype=np.bool)
        for i in range(ndata):
            jts = d['joints8'][..., i]
            d['indmap'][..., i] = HMLPE.create_part_indicatormap(
                jts, part_idx, mdim, rate, filter_size, stride)
        myio.pickle(iu.fullfile(save_dir, fn), d)
Example #8
0
def ReadDataToCifarDic(imgdir, example_path, data_category, max_per_batch,
                       save_dir):
    """
        read all data in 'data_category'
        into cifar style dictionary                
    """
    import scipy.io as sio
    import iutils as iu
    import cifar
    import iconvnet_datacvt as icvt
    from iutils import imgproc as imgproc
    from PIL import Image
    if data_category != 'istest':
        print 'I haven' 't implement joints8 part '
        #raise ModecError('I haven''t implement joints8 part ')
    all_examples = sio.loadmat(example_path)['examples']
    examples = ExtractSubExample(all_examples, data_category)

    ndata = examples.shape[-1]
    iu.ensure_dir(save_dir)
    s_first = min(ndata, max_per_batch)
    d = cifar.PrepareData(s_first)
    d['oridet'] = np.ndarray((4, s_first), dtype=np.int)
    d['filepath'] = [str() for x in range(s_first)]
    d['coords'] = np.ndarray((2, 29, s_first), dtype=np.float32)
    tdsize = cifar.img_size[0]  # make sure img_size[0] == img_size[1]

    j = 0
    bid = 1
    for i in range(ndata):
        if j == max_per_batch:
            icvt.ut.pickle(iu.fullfile(save_dir, 'data_batch_' + str(bid)), \
                           d)
            bid = bid + 1
            j = 0
            if ndata - i < max_per_batch:
                d = cifar.PrepareData(ndata - i)
        fn = str(examples[i]['filepath'][0])
        fp = iu.fullfile(imgdir, fn)
        img = Image.open(fp)
        tbox = examples[i]['torsobox'][0].reshape((4))
        d['filepath'][j] = fp
        d['oridet'][..., j] = tbox
        d['oribbox'][..., j] = bbox = ExtendBndbox(tbox, img.size)
        d['coords'][..., j] = examples[i]['coords']
        orijoints8 = CvtCoordsToJoints(examples[i]['coords']).reshape(
            (8, 2), order='C')
        d['joints8'][..., j] = TransformPoints(orijoints8, bbox,
                                               cifar.img_size).reshape(
                                                   (16), order='C')
        img = imgproc.ensure_rgb(np.asarray(img))
        sub_img = Image.fromarray(img[bbox[1]:bbox[3], bbox[0]:bbox[2], :])
        data_img = np.asarray(sub_img.resize((cifar.img_size[0],\
                                               cifar.img_size[1]))).reshape((cifar.dim_data),order='F')
        d['data'][..., j] = data_img
        j = j + 1
    icvt.ut.pickle(iu.fullfile(save_dir, 'data_batch_' + str(bid)), d)
Example #9
0
def ReadDataToHMLPEDic(imgdir,example_path, data_category, max_per_batch,save_dir):
    """
    Read all data in 'data_category'
    into HMLPE dictionary
    There is no need to generating training data, since they can be generated in
    hmlpe.py 
    """
    import scipy.io as sio
    import iutils as iu
    import iread.myio as mio
    import iread.hmlpe as hmlpe
    import imgproc
    from PIL import Image
    if data_category != 'istest':
        print 'Warn: The correctness of data type %s is not guaranteed' % data_category
    all_example = sio.loadmat(example_path)['examples']
    examples = ExtractSubExample(all_example, data_category)
    ndata = examples.shape[-1]
    iu.ensure_dir(save_dir)
    buf_size = min(ndata, max_per_batch)
    dimdic = {'data':(112,112,3), 'part_indmap':(8,8), 'joint_indmap':(8,8)} 
    nparts  = 7
    njoints = 8
    d = hmlpe.HMLPE.prepare_savebuffer(dimdic, buf_size, nparts, njoints)
    d['oridet'] = np.zeros((4,buf_size), dtype=np.int)
    d['coords'] = np.ndarray((2,29, buf_size), dtype=np.float32)
    tdsize = dimdic['data'][0]
    dsize = dimdic['data'][0] * dimdic['data'][1] * dimdic['data'][2]
    d['data'] = d['data'].reshape((dsize, -1),order='F')
    d['is_positive'][:] = True
    d['is_mirror'][:] = False
    bid = 1
    j = 0
    for i in range(ndata):
        if j == max_per_batch:
           mio.pickle(iu.fullfile(save_dir, 'data_batch_%d' % bid), d)
           bid = bid + 1
           if ndata - i < max_per_batch:
               d = hmlpe.HMLPE.prepare_savebuffer(dimdic, buf_size, nparts, njoints)
        fp = iu.fullfile(imgdir, str(examples[i]['filepath'][0]))
        img = Image.open(fp)
        tbox = examples[i]['torsobox'][0].reshape((4))
        d['filenames'][j] = fp
        d['coords'][...,j] = examples[i]['coords']
        d['oribbox'][...,j] = bbox = ExtendBndbox(tbox, img.size) 
        orijoints8 = CvtCoordsToJoints(examples[i]['coords']).reshape((8,2),order='C') - 1 # to python stype 0-idx
        d['joints8'][...,j] = TransformPoints(orijoints8, bbox, dimdic['data']).reshape((8,2),order='C')
        imgarr = imgproc.ensure_rgb(np.asarray(img))
        sub_img = Image.fromarray(imgarr[bbox[1]:bbox[3], bbox[0]:bbox[2],:])
        data_img = np.asarray(sub_img.resize((dimdic['data'][0], dimdic['data'][1]))).reshape((dsize),order='F') 
        d['data'][...,j] = data_img
        d['indmap'][...,j] = hmlpe.HMLPE.create_part_indicatormap(d['joints8'][...,j], hmlpe.part_idx, dimdic['part_indmap'], 0.3, 30.0,  12.0)
        d['joint_indmap'][...,j] = hmlpe.HMLPE.create_joint_indicatormap(d['joints8'][...,j], dimdic['joint_indmap'], 30.0, 12.0)
        d['jointmasks'][...,j] = hmlpe.HMLPE.makejointmask(dimdic['data'], d['joints8'][...,j])
        j = j + 1
    mio.pickle(iu.fullfile(save_dir, 'data_batch_%d' % bid), d)
Example #10
0
def ReadCropImageToHMLPEDic(dataset_dir, save_dir, istrain = False, isOC = True):
    """
    This function will be used for generating testing data
    Because training and testing data has different format in oribbox

    For generating trainign samples,
    please use
    create_lsp_regression_data.m
    (dataset_dir, type=3, opt)
      opt.OC = ?
      
    and hmlpe.py
    """
    import iutils as iu
    import iread.hmlpe as hmlpe
    import iread.myio as mio
    import scipy.io as sio
    from PIL import Image
    ndata = 1000
    if istrain:
        s_idx = 0
    else:
        s_idx = 1000
    imgdir = iu.fullfile(dataset_dir, 'images-crop')
    if isOC:
        dmat = sio.loadmat(iu.fullfile(dataset_dir, 'jointsOC.mat'))
    else:
        dmat = sio.loadmat(iu.fullfile(dataset_dir, 'joints-crop.mat'))
    lsp_jt = dmat['joints']
    dimdic = {'data':(112,112,3), 'part_indmap':(8,8), 'joint_indmap': (8,8)}
    nparts = 7
    njoints = 8
    d = hmlpe.HMLPE.prepare_savebuffer(dimdic, ndata, nparts, njoints)
    d['data'] = d['data'].reshape((-1,ndata),order='F')
    d['is_mirror'][:] = False
    d['is_positive'][:] = True
    for idx in range(s_idx, s_idx + ndata):
        imgpath = iu.fullfile(imgdir, 'im%04d.jpg' % (idx + 1))
        img = Image.open(imgpath)
        i = idx - s_idx
        orijoints8, isvisible = GetJoints8(lsp_jt[...,idx]) 
        bbox = GetUpperBodyBox(img.size)
        img_arr = np.asarray(img)[bbox[1]:bbox[3], bbox[0]:bbox[2],:]
        s = np.asarray([(dimdic['data'][1]-1.0)/(bbox[2] - bbox[0]),(dimdic['data'][0]-1.0)/(bbox[3]-bbox[1])]).reshape((1,2))
        tjoints = (orijoints8 - bbox[0:2,:].reshape((1,2)))*s
        masks = hmlpe.HMLPE.makejointmask(dimdic['data'], tjoints)
        d['data'][...,i] = np.asarray(Image.fromarray(img_arr).resize((dimdic['data'][0], dimdic['data'][1]))).reshape((-1,1),order='F').flatten()
        d['joints8'][...,i] =  tjoints
        d['jointmasks'][...,i] = np.logical_and(masks, isvisible)
        d['filenames'][i] = imgpath
        d['oribbox'][...,i] = bbox.flatten()
        d['indmap'][...,i] = hmlpe.HMLPE.create_part_indicatormap(tjoints, hmlpe.part_idx, dimdic['part_indmap'], 0.3, 30.0, 12.0)
        d['joint_indmap'][...,i] = hmlpe.HMLPE.create_joint_indicatormap(tjoints, dimdic['joint_indmap'], 30.0, 12.0)
    mio.pickle(iu.fullfile(save_dir, 'data_batch_1'), d)
Example #11
0
 def __init__(self, options = None):
     self.cv2 = __import__('cv2')
     self.igeo = __import__('ipyml').geometry
     if options is None:
         self.model_dir =  '/opt/visal/tmp/for_sijin/Data/opencv_trained_models'
     else:
         self.model_dir = options['model_dir']
     self.face_cascade = dict()
     self.face_cascade['frontal'] = self.cv2.CascadeClassifier(iu.fullfile(self.model_dir, \
                                                                'haarcascade_frontalface_default.xml'))
     self.face_cascade['profile'] = self.cv2.CascadeClassifier(iu.fullfile(self.model_dir, \
                                                                        'haarcascade_profileface.xml'))
Example #12
0
def ReadDataToCifarDic(imgdir,example_path, data_category, max_per_batch,save_dir):
    """
        read all data in 'data_category'
        into cifar style dictionary                
    """
    import scipy.io as sio
    import iutils as iu
    import cifar
    import iconvnet_datacvt as icvt
    from iutils import imgproc as imgproc
    from PIL import Image
    if data_category != 'istest':
        print 'I haven''t implement joints8 part '
        #raise ModecError('I haven''t implement joints8 part ')
    all_examples = sio.loadmat(example_path)['examples']
    examples = ExtractSubExample(all_examples, data_category)

    ndata = examples.shape[-1]
    iu.ensure_dir(save_dir)
    s_first = min(ndata, max_per_batch)
    d = cifar.PrepareData(s_first)
    d['oridet'] = np.ndarray((4,s_first),dtype=np.int)
    d['filepath'] = [str() for x in range(s_first)]
    d['coords'] = np.ndarray((2,29,s_first),dtype=np.float32)
    tdsize= cifar.img_size[0] # make sure img_size[0] == img_size[1]
    
    j = 0
    bid = 1
    for i in range(ndata):
        if j == max_per_batch:
            icvt.ut.pickle(iu.fullfile(save_dir, 'data_batch_' + str(bid)), \
                           d)
            bid = bid + 1
            j = 0
            if ndata - i < max_per_batch:
                d = cifar.PrepareData(ndata-i)                
        fn = str(examples[i]['filepath'][0])
        fp = iu.fullfile(imgdir, fn)
        img = Image.open(fp)
        tbox = examples[i]['torsobox'][0].reshape((4))
        d['filepath'][j] = fp
        d['oridet'][...,j] = tbox
        d['oribbox'][...,j] = bbox = ExtendBndbox(tbox,img.size)
        d['coords'][...,j] = examples[i]['coords']
        orijoints8 = CvtCoordsToJoints(examples[i]['coords']).reshape((8,2),order='C')
        d['joints8'][...,j] = TransformPoints(orijoints8, bbox,cifar.img_size).reshape((16),order='C')
        img = imgproc.ensure_rgb(np.asarray(img))
        sub_img = Image.fromarray(img[bbox[1]:bbox[3], bbox[0]:bbox[2],:])
        data_img = np.asarray(sub_img.resize((cifar.img_size[0],\
                                               cifar.img_size[1]))).reshape((cifar.dim_data),order='F')
        d['data'][...,j] = data_img
        j = j + 1
    icvt.ut.pickle(iu.fullfile(save_dir, 'data_batch_' + str(bid)),d)
Example #13
0
 def __init__(self, options=None):
     self.cv2 = __import__('cv2')
     self.igeo = __import__('ipyml').geometry
     if options is None:
         self.model_dir = '/opt/visal/tmp/for_sijin/Data/opencv_trained_models'
     else:
         self.model_dir = options['model_dir']
     self.face_cascade = dict()
     self.face_cascade['frontal'] = self.cv2.CascadeClassifier(iu.fullfile(self.model_dir, \
                                                                'haarcascade_frontalface_default.xml'))
     self.face_cascade['profile'] = self.cv2.CascadeClassifier(iu.fullfile(self.model_dir, \
                                                                        'haarcascade_profileface.xml'))
Example #14
0
def test_addmeta(metadir):
    metapath = iu.fullfile(metadir, 'batches.meta')
    d_meta = myio.unpickle(metapath)
    d_meta['ind_dim'] = dict()
    d_meta['ind_dim']['part_indmap'] = (8, 8)
    d_meta['ind_dim']['joint_indmap'] = (8, 8)
    myio.pickle(metapath, d_meta)
Example #15
0
def collect_feature_meta(folder, re_exp='batch_feature_\w+$'):
    allfile = sorted(iu.getfilelist(folder, re_exp), key=lambda x:extract_batch_num(x))
    feature_list_lst = []
    feature_dim = None
    indexes_lst = []
    feature_names = None
    if len(allfile) == 0:
        return dict()
    for f in allfile:
        print f
        p =  iu.fullfile(folder, f)
        d = mio.unpickle(p)
        feature_list_lst += [d['feature_list']]
        if feature_dim:
            if feature_dim!= d['feature_dim']:
                raise Exception('feature dim inconsistent')
        else:
            feature_dim = d['feature_dim']
        indexes_lst += [d['info']['indexes']]
    indexes = np.concatenate(indexes_lst)
    n_feature, n_batch = len(feature_dim), len(allfile)
    feature_list = [np.concatenate([feature_list_lst[i][k] for i in range(n_batch)],
                                   axis=-1)
                    for k in range(n_feature)]
    return {'feature_list':feature_list, 'feature_dim':feature_dim, 'info':{'indexes':indexes,
                                                                        'feature_names':d['info']['feature_names']}}
Example #16
0
def pack_01():
    """
    input:   fc_j0 feature,  rel_pose
    outputs: rel_pose, fc_j0_feature
    """
    source_feature_network_path = '/opt/visal/tmp/for_sijin/Data/saved/theano_models/2015_02_02_acm_act_14_exp_2_19_graph_0012/'
    source_meta_path = '/opt/visal/tmp/for_sijin/tmp/tmp_saved'
    
    exp_meta_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_ASM_act_14_exp_2/batches.meta'
    save_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_FCJ0_act_14'
    feature_name = 'Relative_Y3d_mono_body'
    res = dict()
    exp_meta = mio.unpickle(exp_meta_path)
    source_meta = dutils.collect_feature_meta(source_meta_path)
    rel_pose = exp_meta[feature_name]
    fc_j0_feature = source_meta['feature_list'][1]
    rel_gt = source_meta['feature_list'][0]

    diff = rel_gt.reshape((-1, rel_gt.shape[-1]),order='F') * 1200 - rel_pose
    print 'diff is {}'.format(diff.flatten().sum())
    feature_list =  [rel_pose, fc_j0_feature]
    feature_dim = [rel_pose.shape[0], fc_j0_feature.shape[0]]
    print feature_dim, '<<<feature dim'
    res = {'feature_list': feature_list, 'feature_dim':feature_dim,
           'info':{'indexes':source_meta['info']['indexes'],
                   'max_depth': 1200.0}}
    indexes = res['info']['indexes']
    res['info']['soure_feature_network_path'] = source_feature_network_path
    print indexes[:10], min(indexes), max(indexes)
    print 'The number of data is {} == {}'.format(indexes.size, feature_list[0].shape[-1])
    iu.ensure_dir(save_path)
    mio.pickle(iu.fullfile(save_path, 'batches.meta'), res)
Example #17
0
def gbns(name,sp, params):
    """
    get bias for combining norm and scale layer
    params[0] = model_folder
    params[1] = norm layer name  [source]
    params[2] = scale layer name [source]
    """
    model_folder, norm_name, scale_name = params[0], params[1], params[2]
    stat_folder = iu.fullfile(model_folder, 'stat')
    stat_path = Solver.get_saved_model_path(stat_folder)
    stat = mio.unpickle(stat_path)
    model  = Solver.get_saved_model(model_folder)
    layers = get_layers(model)
    W= layers[scale_name][2]['weights'][0]
    b= layers[scale_name][2]['biases'][0]
    print 'W-------------'
    iu.print_common_statistics(W)
    print 'b'
    iu.print_common_statistics(b)
    if 'epsilon' in layers[norm_name][2]: 
        epsilon = layers[norm_name][2]['epsilon']
    else:
        epsilon = 1e-6
    u = stat['layers'][norm_name]['u'].flatten()
    var = stat['layers'][norm_name]['var'].flatten()
    return [b - W * u / (np.sqrt(var + epsilon))]
Example #18
0
 def parse_params(self, params):
     DataProvider.parse_params(self, params)
     if self.data_dic is None:
         if 'data_path' in params:
             self.data_dic = mio.unpickle(iu.fullfile(params['data_path'], 'batches.meta'))
         else:
             raise Exception('data-path is missing')
Example #19
0
def gbns(name, sp, params):
    """
    get bias for combining norm and scale layer
    params[0] = model_folder
    params[1] = norm layer name  [source]
    params[2] = scale layer name [source]
    """
    model_folder, norm_name, scale_name = params[0], params[1], params[2]
    stat_folder = iu.fullfile(model_folder, 'stat')
    stat_path = Solver.get_saved_model_path(stat_folder)
    stat = mio.unpickle(stat_path)
    model = Solver.get_saved_model(model_folder)
    layers = get_layers(model)
    W = layers[scale_name][2]['weights'][0]
    b = layers[scale_name][2]['biases'][0]
    print 'W-------------'
    iu.print_common_statistics(W)
    print 'b'
    iu.print_common_statistics(b)
    if 'epsilon' in layers[norm_name][2]:
        epsilon = layers[norm_name][2]['epsilon']
    else:
        epsilon = 1e-6
    u = stat['layers'][norm_name]['u'].flatten()
    var = stat['layers'][norm_name]['var'].flatten()
    return [b - W * u / (np.sqrt(var + epsilon))]
Example #20
0
def test_CroopedDHMPLEJointDataWarper():
    data_dir = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_ASM_act_14_exp_2'
    data_path = iu.fullfile(data_dir, 'batches.meta')
    params = {'batch_size': 1024, 'data_path': data_dir}
    data_dic = None
    train = False
    data_range = range(0, 132744)
    test_range = range(132744, 162008)
    dp = CroppedDHMLPEJointDataWarper(data_dic, train, data_range, params)
    epoch, batchnum, alldata = dp.get_next_batch()
    print ' {}.{} [] of {}'.format(epoch, batchnum, len(alldata))

    show_idx = 100
    img = np.require(dp.get_plottable_data(alldata[0][..., show_idx].reshape(
        (-1, 1), order='F')),
                     dtype=np.uint8)
    sp = img.shape
    img = img.reshape((sp[0], sp[1], sp[2]), order='F')
    print np.max(img.flatten())
    print np.min(img.flatten())
    pl.subplot(2, 1, 1)
    pl.imshow(img)
    pl.subplot(2, 1, 2)
    img = dp.cropped_mean_image
    pl.imshow(img / 255.0)
    pl.show()
Example #21
0
def test_addmeta(metadir):
    metapath = iu.fullfile(metadir, 'batches.meta')
    d_meta = myio.unpickle(metapath)
    d_meta['ind_dim'] = dict()
    d_meta['ind_dim']['part_indmap'] = (8,8)
    d_meta['ind_dim']['joint_indmap'] = (8,8)
    myio.pickle(metapath, d_meta)    
Example #22
0
 def parse_params(self, params):
     DataProvider.parse_params(self, params)
     if self.data_dic is None:
         if 'data_path' in params:
             self.data_dic = mio.unpickle(
                 iu.fullfile(params['data_path'], 'batches.meta'))
         else:
             raise Exception('data-path is missing')
Example #23
0
def collect_feature(folder, item, re_exp='batch_feature_\w+$'):
    allfile = sorted(iu.getfilelist(folder, re_exp), key=lambda x:extract_batch_num(x))
    l = []
    for f in allfile:
        p =  iu.fullfile(folder, f)
        d = mio.unpickle(p)
        l = l + [d[item]]
    return np.concatenate(l, axis=1)
Example #24
0
def ExtractAnnotation(annot_dir):
    import iutils as iu
    eplist = [2, 3, 4, 5, 6]
    d = dict()
    for e in eplist:
        txtname = iu.fullfile(annot_dir, 'buffy_s5e' + str(e) + '_sticks.txt')
        ed = ReadBuffyTxt(txtname)
        d[e] = ed
    return d
Example #25
0
def ExtractAnnotation(annot_dir):
    import iutils as iu
    eplist = [2,3,4,5,6]
    d = dict()
    for e in eplist:
        txtname = iu.fullfile(annot_dir, 'buffy_s5e' + str(e) + '_sticks.txt')
        ed = ReadBuffyTxt(txtname)
        d[e] = ed
    return d
Example #26
0
 def __init__(self, imgdir):
     self.Image = __import__('Image')
     self.imgdir = imgdir
     self.cur_idx = -1
     self.images_path = [iu.fullfile(imgdir, x) for x in \
                         sorted(iu.getfilelist(imgdir, '.*\.(jpg|png)'))]
     if len(self.images_path) == 0:
         raise DemoError('I cannot find image uder %s ' % self.images_path)
     print 'I got %d images' % len(self.images_path)   
     ICameraBasic.__init__(self)
Example #27
0
 def __init__(self, imgdir):
     self.Image = __import__('Image')
     self.imgdir = imgdir
     self.cur_idx = -1
     self.images_path = [iu.fullfile(imgdir, x) for x in \
                         sorted(iu.getfilelist(imgdir, '.*\.(jpg|png)'))]
     if len(self.images_path) == 0:
         raise DemoError('I cannot find image uder %s ' % self.images_path)
     print 'I got %d images' % len(self.images_path)
     ICameraBasic.__init__(self)
Example #28
0
def cvt1(source_exp_name, target_exp_name):
    print '''
    SP_t004_act_14:
    source meta [rel_gt,  img_feature_accv_fc_j0,  relskel_feature_t004]
    Raw_SP_t004_act_14:
    target meta [rel_gt,  img_feature_accv_fc_j0,  rel_gt]
    '''
    base_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/'
    source_meta = mio.unpickle(
        iu.fullfile(base_path, 'folder_%s' % source_exp_name, 'batches.meta'))
    target_meta_folder = iu.fullfile(base_path, 'folder_%s' % target_exp_name)
    target_meta_path = iu.fullfile(target_meta_folder, 'batches.meta')
    d = source_meta.copy()
    print d.keys()
    d['feature_list'] = [source_meta['feature_list'][k] for k in [0, 1, 0]]
    d['feature_dim'] = [source_meta['feature_dim'][k] for k in [0, 1, 0]]
    # print d['info']
    print 'folder :{}\n path {}'.format(target_meta_folder, target_meta_path)
    iu.ensure_dir(target_meta_folder)
    mio.pickle(target_meta_path, d)
Example #29
0
def cvt1(source_exp_name, target_exp_name):
    print '''
    SP_t004_act_14:
    source meta [rel_gt,  img_feature_accv_fc_j0,  relskel_feature_t004]
    Raw_SP_t004_act_14:
    target meta [rel_gt,  img_feature_accv_fc_j0,  rel_gt]
    '''
    base_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/'
    source_meta = mio.unpickle(iu.fullfile(base_path, 'folder_%s' % source_exp_name,
                                           'batches.meta'))
    target_meta_folder = iu.fullfile(base_path, 'folder_%s' % target_exp_name) 
    target_meta_path =  iu.fullfile(target_meta_folder, 'batches.meta') 
    d = source_meta.copy()
    print d.keys()
    d['feature_list'] = [source_meta['feature_list'][k] for k in [0, 1, 0]]
    d['feature_dim'] = [source_meta['feature_dim'][k] for k in [0, 1, 0]]
    # print d['info']
    print 'folder :{}\n path {}'.format(target_meta_folder, target_meta_path)
    iu.ensure_dir(target_meta_folder)
    mio.pickle(target_meta_path, d)
Example #30
0
def merge_batch_data(data_dir_list,
                     save_dir,
                     is_symbolic=True,
                     batch_start_num=1):
    """
    This function will merge all the data_batches in data_dir into one folder
     and rename them accordining.
       Of cause, meta data will be updated 
    """
    import os
    import shutil
    iu.ensure_dir(save_dir)
    meta = None
    for ddir in data_dir_list:
        cur_meta = myio.unpickle(iu.fullfile(ddir, 'batches.meta'))
        meta = HMLPE.merge_meta(meta, cur_meta)

    myio.pickle(iu.fullfile(save_dir, 'batches.meta'), meta)
    cur_id = batch_start_num
    for ddir in data_dir_list:
        all_file = iu.getfilelist(ddir, 'data_batch_\d+')
        print 'I find %d batches in %s' % (len(all_file), ddir)
        if is_symbolic:
            for fn in all_file:
                sn = iu.fullfile(save_dir, 'data_batch_%d' % cur_id)
                if iu.exists(sn, 'file'):
                    os.remove(sn)
                os.symlink(iu.fullfile(ddir, fn), sn)
                cur_id = cur_id + 1
        else:
            for fn in all_file:
                shutil.copyfile(
                    iu.fullfile(ddir, fn),
                    iu.fullfile(save_dir, 'data_batch_%d' % cur_id))
                cur_id = cur_id + 1
Example #31
0
def merge_batch_data(data_dir_list, save_dir, is_symbolic = True, batch_start_num = 1):
    """
    This function will merge all the data_batches in data_dir into one folder
     and rename them accordining.
       Of cause, meta data will be updated 
    """
    import os
    import shutil
    iu.ensure_dir(save_dir)
    meta = None
    for ddir in data_dir_list:
        cur_meta = myio.unpickle(iu.fullfile(ddir, 'batches.meta'))    
        meta = HMLPE.merge_meta(meta, cur_meta)

    myio.pickle(iu.fullfile(save_dir, 'batches.meta'), meta)
    cur_id = batch_start_num
    for ddir in data_dir_list:
        all_file = iu.getfilelist(ddir, 'data_batch_\d+')
        print 'I find %d batches in %s' % (len(all_file), ddir)
        if is_symbolic:
            for fn in all_file:
                sn = iu.fullfile(save_dir, 'data_batch_%d' %  cur_id)
                if iu.exists(sn, 'file'):
                    os.remove(sn)
                os.symlink(iu.fullfile(ddir, fn), sn)
                cur_id = cur_id + 1
        else:
            for fn in all_file:
                shutil.copyfile(iu.fullfile(ddir, fn), iu.fullfile(save_dir, 'data_batch_%d' %  cur_id))
                cur_id = cur_id + 1
Example #32
0
 def generate_data(self, generate_type, allfile = None):
     """
     generate_type = 'rt' only
     """
     if allfile is None:
         allfile = iu.getfilelist( self.imgdata_info['imgdata_path'], '\w+\.mat')
     print 'imgdatapath=%s, %d files are found' % (self.imgdata_info['imgdata_path'], len(allfile))
     iu.ensure_dir(self.savedata_info['savedir'])
     self.batch_id = self.savedata_info['start_patch_id']
     ndata = 0
     self.meta = {'imgdata_info':self.imgdata_info,'savedata_info':self.savedata_info}
     self.meta['num_vis'] = iu.prod(self.savedata_info['newdim'])
     self.meta['data_sum'] = 0
     self.meta['ndata'] = 0
     self.meta['nparts'] = len(part_idx) 
     for fn in allfile:
         if generate_type == 'rt':
             mpath = iu.fullfile(self.imgdata_info['imgdata_path'], fn)
             self.generate_rt_data(iu.fullfile(mpath))
     if self.meta['ndata'] > 0:
         self.meta['data_mean']  = self.meta['data_sum'] / self.meta['ndata']
     del self.meta['data_sum']
     myio.pickle(iu.fullfile(self.savedata_info['savedir'], 'batches.meta'), self.meta)
Example #33
0
 def generate_positive_data(self, generate_type, allfile = None):
     """
     generate_type = 'rt': random translation
                     'ct'  center block
     """
     if allfile is None:
         allfile = iu.getfilelist( self.imgdata_info['imgdatapath'], '\w+\.mat')
     print 'imgdatapath=%s, %d files are found' % (self.imgdata_info['imgdatapath'], len(allfile))
     iu.ensure_dir(self.savedata_info['savedir'])
     self.batch_id = self.savedata_info['start_patch_id']
     self.init_meta(generate_type)
     print self.meta
     np.random.seed(7)
     for fn in allfile:
         print 'Processing %s ' % fn
         mpath = iu.fullfile(self.imgdata_info['imgdatapath'], fn)
         self.generate_positive_data_from_mat(generate_type ,iu.fullfile(mpath))
     if self.meta['ndata'] > 0:
         self.meta['data_mean']  = self.meta['data_sum'] / self.meta['ndata']
         self.meta['data_mean'] = self.meta['data_mean'].reshape((-1,1))
     else:
         self.meta['data_mean'] = 0
     del self.meta['data_sum']
     myio.pickle(iu.fullfile(self.savedata_info['savedir'], 'batches.meta'), self.meta)
Example #34
0
    def do_accveval(self):
        images_folder = self.op.get_value('images_folder')
        # get all jpg file in images_folder
        allfiles = iu.getfilelist(images_folder, '.*\.jpg')
        images_path = [iu.fullfile(images_folder, p) for p in allfiles]
        n_image = len(images_path)
        images = self.load_images(images_path)
        mean_image_path = self.op.get_value('mean_image_path')
        mean_image = sio.loadmat(mean_image_path)['cropped_mean_image']
        mean_image_arr = mean_image.reshape((-1, 1), order='F')
        input_images = images - mean_image_arr
        # pack input images into batch data
        data = [
            input_images,
            np.zeros((51, n_image), dtype=np.single),
            np.zeros((1700, n_image), dtype=np.single)
        ]
        # allocate the buffer for prediction
        pred_buffer = np.zeros((n_image, 51), dtype=np.single)
        data.append(pred_buffer)

        ext_data = [
            np.require(elem, dtype=np.single, requirements='C')
            for elem in data
        ]
        # run the model
        ## get the joint prediction layer indexes
        self.pred_layer_idx = self.get_layer_idx('fc_j2', check_type='fc')
        self.libmodel.startFeatureWriter(ext_data, self.pred_layer_idx)
        self.finish_batch()

        raw_pred = ext_data[-1].T
        pred = dhmlpe_features.convert_relskel2rel(raw_pred) * 1200.0

        # show the first prediction
        show_idx = 0
        img = np.array(Image.open(images_path[show_idx]))
        fig = pl.figure(0)
        ax1 = fig.add_subplot(121)
        ax1.imshow(img)
        ax2 = fig.add_subplot(122, projection='3d')
        cur_pred = pred[..., show_idx].reshape((3, -1), order='F')
        part_idx = iread.h36m_hmlpe.part_idx
        params = {'elev': -94, 'azim': -86, 'linewidth': 6, 'order': 'z'}
        dutils.show_3d_skeleton(cur_pred.T, part_idx, params)
Example #35
0
def gwns(name, sp_list, params):
    """
    get weights for combining norm and scale layer
    params[0] = model_folder
    params[1] = norm layer name  [source]
    params[2] = scale layer name [source]
    """
    model_folder, norm_name, scale_name = params[0], params[1], params[2]
    stat_folder = iu.fullfile(model_folder, 'stat')
    stat_path = Solver.get_saved_model_path(stat_folder)
    stat = mio.unpickle(stat_path)
    print 'stat keys = {}'.format(stat['layers'].keys())
    model = Solver.get_saved_model(model_folder)
    layers = get_layers(model)
    W = layers[scale_name][2]['weights']
    if 'epsilon' in layers[norm_name][2]:
        epsilon = layers[norm_name][2]['epsilon']
    else:
        epsilon = 1e-6
    # u = stat['layers'][norm_name]['u'].flatten()
    var = stat['layers'][norm_name]['var'].flatten()
    return [W[0] / np.sqrt(var + epsilon)]
Example #36
0
def gwns(name, sp_list, params):
    """
    get weights for combining norm and scale layer
    params[0] = model_folder
    params[1] = norm layer name  [source]
    params[2] = scale layer name [source]
    """
    model_folder, norm_name, scale_name = params[0], params[1], params[2]
    stat_folder = iu.fullfile(model_folder, 'stat')
    stat_path = Solver.get_saved_model_path(stat_folder)
    stat = mio.unpickle(stat_path)
    print 'stat keys = {}'.format(stat['layers'].keys())
    model  = Solver.get_saved_model(model_folder)
    layers = get_layers(model)
    W= layers[scale_name][2]['weights']
    if 'epsilon' in layers[norm_name][2]: 
        epsilon = layers[norm_name][2]['epsilon']
    else:
        epsilon = 1e-6
    # u = stat['layers'][norm_name]['u'].flatten()
    var = stat['layers'][norm_name]['var'].flatten()
    return [W[0] / np.sqrt(var + epsilon)]
Example #37
0
    def do_accveval(self):
        images_folder = self.op.get_value('images_folder')
        # get all jpg file in images_folder
        allfiles = iu.getfilelist(images_folder, '.*\.jpg')
        images_path = [iu.fullfile(images_folder, p) for p in allfiles]
        n_image = len(images_path)
        images = self.load_images(images_path)
        mean_image_path = self.op.get_value('mean_image_path')
        mean_image = sio.loadmat(mean_image_path)['cropped_mean_image']
        mean_image_arr = mean_image.reshape((-1,1),order='F')
        input_images = images - mean_image_arr
        # pack input images into batch data
        data = [input_images, np.zeros((51,n_image),dtype=np.single),
                np.zeros((1700,n_image), dtype=np.single)]
        # allocate the buffer for prediction
        pred_buffer = np.zeros((n_image, 51),dtype=np.single)
        data.append(pred_buffer)

        ext_data = [np.require(elem,dtype=np.single, requirements='C') for elem in data]
        # run the model
        ## get the joint prediction layer indexes
        self.pred_layer_idx = self.get_layer_idx('fc_j2',check_type='fc')
        self.libmodel.startFeatureWriter(ext_data, self.pred_layer_idx)
        self.finish_batch()

        raw_pred = ext_data[-1].T
        pred = dhmlpe_features.convert_relskel2rel(raw_pred) * 1200.0

        # show the first prediction
        show_idx = 0
        img = np.array(Image.open(images_path[show_idx]))
        fig = pl.figure(0)
        ax1 = fig.add_subplot(121)
        ax1.imshow(img)
        ax2 = fig.add_subplot(122,projection='3d')
        cur_pred = pred[..., show_idx].reshape((3,-1),order='F')
        part_idx = iread.h36m_hmlpe.part_idx
        params =  {'elev':-94, 'azim':-86, 'linewidth':6, 'order':'z'}
        dutils.show_3d_skeleton(cur_pred.T, part_idx, params)
Example #38
0
def pack_01():
    """
    input:   fc_j0 feature,  rel_pose
    outputs: rel_pose, fc_j0_feature
    """
    source_feature_network_path = '/opt/visal/tmp/for_sijin/Data/saved/theano_models/2015_02_02_acm_act_14_exp_2_19_graph_0012/'
    source_meta_path = '/opt/visal/tmp/for_sijin/tmp/tmp_saved'

    exp_meta_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_ASM_act_14_exp_2/batches.meta'
    save_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_FCJ0_act_14'
    feature_name = 'Relative_Y3d_mono_body'
    res = dict()
    exp_meta = mio.unpickle(exp_meta_path)
    source_meta = dutils.collect_feature_meta(source_meta_path)
    rel_pose = exp_meta[feature_name]
    fc_j0_feature = source_meta['feature_list'][1]
    rel_gt = source_meta['feature_list'][0]

    diff = rel_gt.reshape((-1, rel_gt.shape[-1]), order='F') * 1200 - rel_pose
    print 'diff is {}'.format(diff.flatten().sum())
    feature_list = [rel_pose, fc_j0_feature]
    feature_dim = [rel_pose.shape[0], fc_j0_feature.shape[0]]
    print feature_dim, '<<<feature dim'
    res = {
        'feature_list': feature_list,
        'feature_dim': feature_dim,
        'info': {
            'indexes': source_meta['info']['indexes'],
            'max_depth': 1200.0
        }
    }
    indexes = res['info']['indexes']
    res['info']['soure_feature_network_path'] = source_feature_network_path
    print indexes[:10], min(indexes), max(indexes)
    print 'The number of data is {} == {}'.format(indexes.size,
                                                  feature_list[0].shape[-1])
    iu.ensure_dir(save_path)
    mio.pickle(iu.fullfile(save_path, 'batches.meta'), res)
Example #39
0
def process(op):
    data_folder = op.get_value('load_file')
    save_path = op.get_value('save_path')
    # data_folder = '/public/sijinli2/ibuffer/2015-01-16/net2_test_for_stat_2000'
    all_files = iu.getfilelist(data_folder, '\d+@\d+$')
    print all_files
    d = mio.unpickle(iu.fullfile(data_folder, all_files[0]))
    ms = d['model_state']
    if op.get_value('cost_name') is not None:
        cost_names = op.get_value('cost_name').split(',')
        n_cost = len(cost_name)
    else:
        n_cost = len(d['solver_params']['train_error'][0])
        cost_names = d['solver_params']['train_error'][0].keys()
    print 'Start to plot'
    start_time = time()
    for i in range(n_cost):
        pl.subplot(n_cost, 1, i + 1)
        plot_cost(op, d, cost_names[i])
    print 'Cost {} seconds '.format(time()- start_time)
    if save_path:
        imgproc.imsave_tight(save_path)
    pl.show()
Example #40
0
def process(op):
    data_folder = op.get_value('load_file')
    save_path = op.get_value('save_path')
    # data_folder = '/public/sijinli2/ibuffer/2015-01-16/net2_test_for_stat_2000'
    all_files = iu.getfilelist(data_folder, '\d+@\d+$')
    print all_files
    d = mio.unpickle(iu.fullfile(data_folder, all_files[0]))
    ms = d['model_state']
    if op.get_value('cost_name') is not None:
        cost_names = op.get_value('cost_name').split(',')
        n_cost = len(cost_name)
    else:
        n_cost = len(d['solver_params']['train_error'][0])
        cost_names = d['solver_params']['train_error'][0].keys()
    print 'Start to plot'
    start_time = time()
    for i in range(n_cost):
        pl.subplot(n_cost, 1, i + 1)
        plot_cost(op, d, cost_names[i])
    print 'Cost {} seconds '.format(time() - start_time)
    if save_path:
        imgproc.imsave_tight(save_path)
    pl.show()
Example #41
0
def test_CroopedDHMPLEJointDataWarper():
    data_dir = "/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_ASM_act_14_exp_2"
    data_path = iu.fullfile(data_dir, "batches.meta")
    params = {"batch_size": 1024, "data_path": data_dir}
    data_dic = None
    train = False
    data_range = range(0, 132744)
    test_range = range(132744, 162008)
    dp = CroppedDHMLPEJointDataWarper(data_dic, train, data_range, params)
    epoch, batchnum, alldata = dp.get_next_batch()
    print " {}.{} [] of {}".format(epoch, batchnum, len(alldata))

    show_idx = 100
    img = np.require(dp.get_plottable_data(alldata[0][..., show_idx].reshape((-1, 1), order="F")), dtype=np.uint8)
    sp = img.shape
    img = img.reshape((sp[0], sp[1], sp[2]), order="F")
    print np.max(img.flatten())
    print np.min(img.flatten())
    pl.subplot(2, 1, 1)
    pl.imshow(img)
    pl.subplot(2, 1, 2)
    img = dp.cropped_mean_image
    pl.imshow(img / 255.0)
    pl.show()
Example #42
0
def shuffle_data(source_dir, target_dir, max_per_file = 4000):
    """
    This function will shuflle all the data in source_dir
    and save it to target_dir
    """
    if source_dir == target_dir:
        raise HMLPEError('source dir can not be the same as target dir')
    import shutil
    import sys
    iu.ensure_dir( target_dir)
    shutil.copy(iu.fullfile(source_dir, 'batches.meta'), \
                iu.fullfile(target_dir, 'batches.meta'))
    meta = myio.unpickle(iu.fullfile(source_dir, 'batches.meta'))
    ndata = meta['ndata']
    nbatch = (ndata  - 1) / max_per_file + 1
    nparts = meta['nparts']
    njoints = meta['njoints']
    newdim = meta['savedata_info']['newdim']
    filter_size = meta['savedata_info']['indmap_para']['filter_size']
    stride = meta['savedata_info']['indmap_para']['stride']
    joint_filter_size = meta['savedata_info']['indmap_para']['joint_filter_size']
    joint_stride = meta['savedata_info']['indmap_para']['joint_stride']
    mdim = HMLPE.get_indmapdim(newdim, filter_size, stride)
    jtmdim = HMLPE.get_indmapdim(newdim, joint_filter_size, joint_stride)
    print('There are %d data in total, I need %d batch to hold it' %(ndata, nbatch))
    print 'Begin creating empty files'
    rest = ndata
    d = HMLPE.prepare_savebuffer({'data':newdim, 'part_indmap':mdim, \
                                       'joint_indmap': jtmdim}, max_per_file, nparts, njoints)
    HMLPE.adjust_savebuffer_shape(d)
    for b in range(nbatch):
        cur_n = min(max_per_file, rest)
        if b != nbatch - 1:
            saved = d
        else:
            saved = HMLPE.prepare_savebuffer({'data':newdim, 'part_indmap':mdim, \
                                       'joint_indmap': jtmdim}, cur_n, nparts, njoints)
            HMLPE.adjust_savebuffer_shape(saved)
        myio.pickle(iu.fullfile(target_dir, 'data_batch_%d' % (b + 1)), saved)
        rest = rest - cur_n
    print 'End creating'
    allbatchfn = iu.getfilelist(source_dir, 'data_batch_\d+')
    np.random.seed(7)
    perm = range(ndata)
    np.random.shuffle(perm)
    buf_cap = 12 # store six batch at most
    nround = (nbatch - 1)/buf_cap + 1
    for rd in range(nround):
        print ('Round %d of %d' % (rd,nround))
        buf = dict()
        offset = 0
        for fn in allbatchfn:
            print( 'Processing %s' % fn )
            d = myio.unpickle(iu.fullfile(source_dir, fn))
            cur_n = d['data'].shape[-1]
            for b in range(rd * buf_cap, min(nbatch, (rd+1)*buf_cap)):
                sys.stdout.write('\rpadding %d of %d' % (b + 1, nbatch))
                sys.stdout.flush() 
                sidx = b * max_per_file
                eidx = min(ndata, sidx + max_per_file)
                cur_idx_list = [i for i in range(cur_n) if perm[offset + i] >= sidx and perm[offset + i] < eidx]
                if len(cur_idx_list) == 0:
                    continue
                if not b in buf:
                    dsave = myio.unpickle(iu.fullfile(target_dir, 'data_batch_%d' % (b+1)))
                    buf[b] = dsave
                else:
                    dsave = buf[b]
                save_idx_list = [perm[ x + offset] - sidx for x in cur_idx_list]
                HMLPE.selective_copydic(d, dsave, cur_idx_list, save_idx_list)
                # myio.pickle(iu.fullfile(target_dir, 'data_batch_%d' % (b+1)), dsave)
            print 'Finished %s' % fn
            offset = offset + cur_n
        for b in range(rd * buf_cap, min(nbatch, (rd+1)*buf_cap)):
            myio.pickle(iu.fullfile(target_dir, 'data_batch_%d' % (b+1)), buf[b])
Example #43
0
    def generate_negative_data_from_image(self, generate_type, allfile=None):
        """
        generate_type = 'neg_sample'
        savedata_info should have 'neg_sample_num':
                      indicating sampling how many negative window per image
        If some image is small, then it will try to generate as much as possible
                      
        """
        import Image
        if allfile is None:
            allfile = iu.getfilelist(self.imgdata_info['imgdatapath'], \
                                     '\w+(\.png|\.jpg|\.pgm|.jpeg)')
        print 'imgdatapath=%s, %d images are found' % (
            self.imgdata_info['imgdatapath'], len(allfile))
        iu.ensure_dir(self.savedata_info['savedir'])
        savedir = self.savedata_info['savedir']
        self.batch_id = self.savedata_info['start_patch_id']
        self.init_meta(generate_type)
        print(self.meta)
        sample_num = self.savedata_info['neg_sample_num']
        totaldata = len(allfile) * sample_num
        self.meta['ndata'] = 0
        newdim = self.savedata_info['newdim']
        nparts = self.meta['nparts']
        njoints = self.meta['njoints']
        if njoints == 8:
            dicjtname = 'joints8'
        else:
            dicjtname = 'joints'
            #raise HMLPEError('njoints = %d are not supported yet' % njoints)
        filter_size = self.savedata_info['indmap_para']['filter_size']
        stride = self.savedata_info['indmap_para']['stride']
        #rate = self.savedata_info['indmap_para']['rate']
        mdim = self.get_indmapdim(newdim, filter_size, stride)
        self.meta['ind_dim']['part_indmap'] = mdim
        joint_filter_size = self.savedata_info['indmap_para'][
            'joint_filter_size']
        joint_stride = self.savedata_info['indmap_para']['joint_stride']
        jtmdim = self.get_indmapdim(newdim, joint_filter_size, joint_stride)
        self.meta['ind_dim']['joint_indmap'] = jtmdim
        per_size = min(totaldata, self.savedata_info['max_batch_size'])
        res = self.prepare_savebuffer({'data':newdim, 'part_indmap':mdim, \
                                       'joint_indmap': jtmdim}, per_size, nparts, njoints)
        res[dicjtname][:] = 0
        res['jointmasks'][:] = False
        res['indmap'][:] = False
        res['joint_indmap'][:] = False
        res['is_mirror'][:] = False
        res['is_positive'][:] = False
        pre_nc = 0
        nc = 0
        np.random.seed(7)
        for it, fn in enumerate(allfile):
            print('Processing %s' % fn)
            curimgpath = iu.fullfile(self.imgdata_info['imgdatapath'], fn)
            img = np.asarray(Image.open(curimgpath), dtype=np.uint8)
            imgdim = img.shape
            if imgdim[0] < newdim[0] or imgdim[1] < newdim[1]:
                print('small image, ignored')
                continue
            mesh = self.create_augumentation_mesh(imgdim, newdim,
                                                  generate_type)
            ts = min(len(mesh), sample_num)
            l = (np.random.permutation(range(len(mesh))))[:ts]
            for p in l:
                r, c = mesh[p]
                timg = img[r:r + newdim[0], c:c + newdim[0], :]
                res['data'][..., nc - pre_nc] = timg
                res['joint_sample_offset'][..., nc - pre_nc] = [c, r]
                res['filenames'][nc - pre_nc] = curimgpath
                res['oribbox'][..., nc - pre_nc] = [
                    c, r, c + newdim[1] - 1, r + newdim[0] - 1
                ]
                nc = nc + 1
            if sample_num + nc - pre_nc > per_size or it == len(allfile) - 1:
                tmpres = self.truncated_copydic(res, nc - pre_nc)
                tmpres['data'] = tmpres['data'].reshape((-1, nc - pre_nc),
                                                        order='F')
                self.meta['data_sum'] += tmpres['data'].sum(axis=1,
                                                            dtype=float)
                self.meta['ndata'] += nc - pre_nc
                savepath = iu.fullfile(self.savedata_info['savedir'], \
                                       self.savedata_info['savename'] + \
                                       '_' +  str(self.batch_id))
                myio.pickle(savepath, tmpres)
                self.batch_id = self.batch_id + 1
                pre_nc = nc
        if self.meta['ndata'] > 0:
            self.meta['data_mean'] = self.meta['data_sum'] / self.meta['ndata']
            self.meta['data_mean'] = self.meta['data_mean'].reshape((-1, 1),
                                                                    order='F')
        else:
            self.meta['data_mean'] = 0
        del self.meta['data_sum']

        myio.pickle(iu.fullfile(self.savedata_info['savedir'], 'batches.meta'),
                    self.meta)
Example #44
0
def ReadTestDataToCifarDic(imgdir, annot_dir, tech_report_path, save_path=None):
    """
    This program will use the detection window by the author
    image data are extracted from the extended detection window
    return type: dictionary with the following keys
                    ep
                    fr
                    annotation
                    oriimg  
                    bbox
                    joints8          ! (coordinate w.r.t bounding box)
                    data             !
                    indmap           ! (all zeros)
    This is the previous version, latest version is
      ReadTestDataToHMLPEDic
    """ 
    import Stickmen
    import scipy.io as sio
    import PIL
    import iutils as iu
    from PIL import Image
    import matplotlib.pyplot as plt
    import iconvnet_datacvt as icvt
    rp = sio.loadmat(tech_report_path)['techrep2010_buffy']
    d_annot = ExtractAnnotation(annot_dir)
    ndata = rp.shape[1]
    data = []
    tsize = 112 # the size of test image
    indsize = 8
    data = dict()
    ndet = sum([ (1 if rp[0,i][0].size==0 else rp[0,i][0].size) for i in range(ndata)]) 
    data['ep'] = np.ndarray([ndet], dtype=np.int)
    data['fr'] = np.ndarray([ndet], dtype=np.int)
    data['annotation'] = np.ndarray([4,6,ndet], dtype=np.float32)
    data['indmap'] = np.zeros([7,indsize,indsize, ndet], dtype=np.bool,order='F')
    data['indmap_para'] = (30,12)
    data['oribbox'] = np.ndarray([4, ndet], dtype=np.float32)
    data['data'] = np.ndarray([tsize, tsize, 3, ndet], order='F', dtype=np.uint8)
    data['joints8'] = np.ndarray([8,2,ndet], dtype=np.float32)
    data['oridet'] = np.ndarray([4,ndet],dtype=np.float32)
    data['imgdir'] = imgdir
    idx = 0   
    for i in range(ndata):
        ep = rp[0,i][1][0][0]
        fr = rp[0,i][2][0][0]
        imgpath = iu.fullfile(imgdir, 'buffy_s5e'+str(ep)+'_original', \
                               ('%06d' % fr) + '.jpg')
        img = Image.open(imgpath)
       
        if rp[0,i][0].size==0: # Not detection found by detector
            data['ep'][...,idx] = ep
            data['fr'][...,idx] = fr
            data['annotation'][...,idx] = Stickmen.ReorderStickmen(d_annot[ep][fr])           
            data['oribbox'][...,idx] = np.zeros([4],dtype=np.float32)
            data['joints8'][...,idx] = np.ones([8,2], dtype=np.float32) * tsize * 2
            data['data'][...,idx] = np.zeros([tsize,tsize,3], dtype=np.uint8)
            data['oridet'][...,idx] = np.zeros([4], dtype=np.float32)
            idx  = idx + 1
            continue
        for j in range(rp[0,i][0].size): 
            det = rp[0,i][0][0,j]['det'][0] # det = (x,y,x1,y1)
            data['oridet'][...,idx] = det 
            det = ExtendBndbox(det, img.size)
            arr_img = np.asarray(img)[det[1]:det[3]+1, det[0]:det[2]+1,:]
            res_img = Image.fromarray(arr_img).resize((tsize,tsize))
            data['ep'][...,idx] = ep
            data['fr'][...,idx] = fr
            data['annotation'][...,idx] = Stickmen.ReorderStickmen(d_annot[ep][fr])
            data['data'][...,idx] = np.asarray(res_img)
            tmppoints = icvt.convert_AHEcoor_to_joints8(data['annotation'][...,idx])
            
            data['joints8'][...,idx] = TransformPoints(tmppoints, det, np.asarray(res_img.size) - 1)
            data['oribbox'][...,idx] = det
            idx = idx + 1
            
    data['data'] = data['data'].reshape((-1, ndet), order='F')
    if save_path is not None:
        icvt.ut.pickle(save_path, data) 
    return data
Example #45
0
    def generate_negative_data_from_image(self, generate_type, allfile=None):
        """
        generate_type = 'neg_sample'
        savedata_info should have 'neg_sample_num':
                      indicating sampling how many negative window per image
        If some image is small, then it will try to generate as much as possible
                      
        """
        import Image
        if allfile is None:
            allfile = iu.getfilelist(self.imgdata_info['imgdatapath'], \
                                     '\w+(\.png|\.jpg|\.pgm|.jpeg)')
        print 'imgdatapath=%s, %d images are found' % (self.imgdata_info['imgdatapath'], len(allfile))
        iu.ensure_dir(self.savedata_info['savedir'])
        savedir = self.savedata_info['savedir']
        self.batch_id = self.savedata_info['start_patch_id']
        self.init_meta(generate_type)
        print(self.meta)
        sample_num = self.savedata_info['neg_sample_num']
        totaldata = len(allfile) * sample_num
        self.meta['ndata'] = 0
        newdim = self.savedata_info['newdim']
        nparts = self.meta['nparts']
        njoints = self.meta['njoints']
        if njoints == 8:
            dicjtname = 'joints8'
        else:
            dicjtname = 'joints'
            #raise HMLPEError('njoints = %d are not supported yet' % njoints)
        filter_size = self.savedata_info['indmap_para']['filter_size']
        stride =  self.savedata_info['indmap_para']['stride']
        #rate = self.savedata_info['indmap_para']['rate']
        mdim = self.get_indmapdim(newdim, filter_size, stride)
        self.meta['ind_dim']['part_indmap'] = mdim
        joint_filter_size = self.savedata_info['indmap_para']['joint_filter_size']
        joint_stride = self.savedata_info['indmap_para']['joint_stride']
        jtmdim = self.get_indmapdim(newdim, joint_filter_size, joint_stride)
        self.meta['ind_dim']['joint_indmap'] = jtmdim
        per_size = min(totaldata, self.savedata_info['max_batch_size'])
        res = self.prepare_savebuffer({'data':newdim, 'part_indmap':mdim, \
                                       'joint_indmap': jtmdim}, per_size, nparts, njoints)
        res[dicjtname][:] = 0
        res['jointmasks'][:] = False
        res['indmap'][:] = False
        res['joint_indmap'][:] = False
        res['is_mirror'][:] = False
        res['is_positive'][:] = False
        pre_nc = 0
        nc = 0
        np.random.seed(7)
        for it, fn in enumerate(allfile):
            print('Processing %s' % fn)
            curimgpath= iu.fullfile(self.imgdata_info['imgdatapath'], fn)
            img = np.asarray(Image.open(curimgpath), dtype=np.uint8)
            imgdim = img.shape
            if imgdim[0] < newdim[0] or imgdim[1] < newdim[1]:
                print('small image, ignored')
                continue
            mesh = self.create_augumentation_mesh(imgdim, newdim, generate_type)
            ts = min(len(mesh), sample_num)
            l = (np.random.permutation(range(len(mesh))))[:ts]
            for p in l:
                r, c = mesh[p]
                timg = img[r:r+newdim[0],c:c+newdim[0],:]
                res['data'][...,nc-pre_nc] = timg
                res['joint_sample_offset'][...,nc-pre_nc] = [c,r]
                res['filenames'][nc-pre_nc] = curimgpath
                res['oribbox'][...,nc-pre_nc] = [c,r,c+newdim[1]-1,r+newdim[0]-1]
                nc = nc + 1
            if sample_num + nc-pre_nc > per_size or it == len(allfile)-1:
                tmpres = self.truncated_copydic(res, nc-pre_nc)
                tmpres['data'] = tmpres['data'].reshape((-1,nc-pre_nc),order='F')
                self.meta['data_sum'] += tmpres['data'].sum(axis=1,dtype=float)
                self.meta['ndata'] += nc - pre_nc
                savepath = iu.fullfile(self.savedata_info['savedir'], \
                                       self.savedata_info['savename'] + \
                                       '_' +  str(self.batch_id))
                myio.pickle(savepath, tmpres)
                self.batch_id = self.batch_id + 1
                pre_nc = nc
        if self.meta['ndata'] > 0:
            self.meta['data_mean'] = self.meta['data_sum'] / self.meta['ndata']
            self.meta['data_mean'] = self.meta['data_mean'].reshape((-1,1),order='F')
        else:
            self.meta['data_mean'] = 0
        del self.meta['data_sum']

        myio.pickle(iu.fullfile(self.savedata_info['savedir'], 'batches.meta'), self.meta)
Example #46
0
def GetImagePath(imgdir, ep, fr):
    import iutils as iu
    imgpath = iu.fullfile(imgdir, 'buffy_s5e'+str(ep)+'_original', \
                               ('%06d' % fr) + '.jpg')
    return imgpath
Example #47
0
def ReadTestDataToHMLPEDic(imgdir, annot_dir, tech_report_path, save_path=None, iou_thresh=0.5):
    """
    This function will use detection window provided by the author
     Also, erro detection window are not included
     For image without detection, all the joint points will be zero
     
    """
    import Stickmen
    import scipy.io as sio
    import PIL
    import iutils as iu
    from PIL import Image
    import matplotlib.pyplot as plt
    import iread.hmlpe as hp
    import iread.myio as mio
    import ipyml.geometry as igeo
    rp = sio.loadmat(tech_report_path)['techrep2010_buffy']
    d_annot = ExtractAnnotation(annot_dir)
    ndata = rp.shape[1]
    data = []
    tsize = 112 # the size of test image
    part_ind_dim = (8,8)
    joint_ind_dim = (8,8)
    filter_size = 32.0
    stride = 12.0
    indrate = 0.3
    joint_filter_size = 32.0
    joint_stride = 12.0
    njoints = 8
    nparts = 7
    ndet = ndata # One test point for one image
    newdim = (tsize,tsize,3)
    data = hp.HMLPE.prepare_savebuffer({'data':newdim,\
                                        'part_indmap': part_ind_dim,\
                                        'joint_indmap':joint_ind_dim },\
                                        ndet, nparts, njoints)
    # add Buffy specific field
    add_buffy_specific_field(data, ndet, imgdir)
    idx = 0
    f_calc_area = lambda rec: (rec[1][1]-rec[0][1]) * (rec[1][0]-rec[0][0]) if len(rec)==2 else 0
    f_mk_rec = lambda det: ((det[0],det[1]), (det[2], det[3]))
    for i in range(ndata):
        ep = rp[0,i][1][0][0]
        fr = rp[0,i][2][0][0]
        imgpath = iu.fullfile(imgdir, 'buffy_s5e'+str(ep)+'_original', \
                               ('%06d' % fr) + '.jpg')
        img = Image.open(imgpath)
        data['ep'][...,i] = ep
        data['fr'][...,i] = fr
        data['filenames'][i] = imgpath
        data['is_positive'][...,i] = True
        data['annotation'][...,i] = Stickmen.ReorderStickmen(d_annot[ep][fr])
        data['gt_det'][...,i] = np.asarray(Stickmen.EstDetFromStickmen( data['annotation'][...,i]))
        gt_rec = f_mk_rec(data['gt_det'][...,i]) #
        gt_area = f_calc_area(gt_rec) 
        if rp[0,i][0].size == 0: # No detection are found in this image
            # imgdata will also be all zero
            # oridet will be all zero
            # oribbox will be all zero
            # joints8 will be all zero
            # jointmasks wiil be all zero
            # indmap will be all zero
            # joint_indmap will be all zero
            # nothing need to be done, since they are default value
            pass
        else:
            m = -1
            for j in range(rp[0,i][0].size):
                det = rp[0,i][0][0,j]['det'][0] # det = (x,y,x1,y1)
                cur_rec = f_mk_rec(det)
                int_area = f_calc_area(igeo.RectIntersectRect(cur_rec, gt_rec))
                if int_area > ( gt_area - int_area ) * iou_thresh:
                  m = j
                  break
            if m != -1:
                oribbox = ExtendBndbox(det, img.size)
                arr_img = np.asarray(img)[oribbox[1]:oribbox[3]+1, \
                                          oribbox[0]:oribbox[2]+1,:]
                res_img = Image.fromarray(arr_img).resize((tsize,tsize))
                data['data'][...,i] =np.asarray(res_img)
                tmppoints = convert_AHEcoor_to_joints8(data['annotation'][...,i])
                data['joints8'][...,i] = TransformPoints(tmppoints, oribbox, np.asarray(res_img.size) -1)
                data['jointmasks'][...,i] = hp.HMLPE.makejointmask(newdim, data['joints8'][...,i])
                data['indmap'][...,i] = hp.HMLPE.create_part_indicatormap(data['joints8'][...,i], hp.part_idx, part_ind_dim, indrate, filter_size, stride)
                data['joint_indmap'][...,i] = hp.HMLPE.create_joint_indicatormap(data['joints8'][...,i], joint_ind_dim, joint_filter_size, joint_stride)
                data['oribbox'][...,i] = oribbox
    data['data'] = data['data'].reshape((-1,ndet),order='F')
    if save_path is not None:
        mio.pickle(save_path, data)
    return data
Example #48
0
    def estimate_pose_main_process(self, input_dic, output_dic):
        import imgproc
        load_next = True
        data, raw_img = input_dic['next_data'], input_dic['raw_img']
        ndata = data[0].shape[-1]
        # output_dic['num_cases'] += [data[0].shape[-1]]
        buf = np.require(np.zeros((data[0].shape[-1], input_dic['data_dim']),\
                                      dtype=n.single), \
                                requirements='C')
        data += [buf]
        start_t = time()
        self.libmodel.startFeatureWriter(data, input_dic['output_layer_idx'])
        if load_next:
            input_dic['next_data'], input_dic['raw_img'], input_dic['bnd'] = \
              self.get_hmlpe_posedata_from_camera(input_dic['camera'], self.test_data_provider)
        self.finish_batch()
        print '[%.6f seconds]' % (time() - start_t)
        if input_dic['target_type'] in input_dic['convert_dic']:
            output_dic['est'] = input_dic['convert_dic'][
                input_dic['target_type']](buf.T)
        else:
            output_dic['est'] = buf.T
        if not load_next:
            return
        sys.stdout.flush()
        tmp = input_dic['raw_img']
        input_dic['camera_im'].set_data(tmp)
        input_dic['camera_fig'].canvas.draw()
        s = int(np.sqrt(data[0].shape[0]))
        if input_dic['target_type'] == 'hmlpe_2d':
            img = Image.fromarray(
                np.require(input_dic['raw_img'], dtype=np.uint8))
            sx, sy = data[0].shape[1], data[0].shape[0]
            output_dic['est'] = output_dic['est'].reshape((2, -1, ndata),
                                                          order='F')
            njoints = output_dic['est'].shape[1]
            cur_bnd = input_dic['bnd']
            bnd_sx = np.tile(np.asarray([(v[2]+1.0) \
                                          for v in cur_bnd]).reshape((1,ndata)),\
                              (njoints, 1)).reshape((1,njoints,ndata),order='F')
            bnd_sy = np.tile(np.asarray([(v[3]+1.0) \
                                          for v in cur_bnd]).reshape((1,ndata)),\
                              (njoints, 1)).reshape((1,njoints,ndata),order='F')
            bnd_s = np.concatenate((bnd_sx, bnd_sy), axis=0)

            bnd_ax = np.tile(np.asarray([v[0]  \
                                         for v in cur_bnd]).reshape((1,ndata)),\
                              (njoints, 1)).reshape((1,njoints,ndata),order='F')
            bnd_ay = np.tile(np.asarray([v[1] \
                                          for v in cur_bnd]).reshape((1,ndata)),\
                              (njoints, 1)).reshape((1,njoints,ndata),order='F')
            bnd_a = np.concatenate((bnd_ax, bnd_ay), axis=0)
            output_dic['est'] = output_dic['est'] * bnd_s + bnd_a

            draw = ImageDraw.Draw(img)
            # draw bnd
            for b in cur_bnd:
                draw.rectangle((b[0], b[1], b[0] + b[2], b[1] + b[3]))
            # draw center rectangle
            hx, hy = img.size
            draw.rectangle((hx / 2 - hy / 2, 0, hx / 2 + hy / 2, hy),
                           outline=(255, 0, 0))
            self.draw2d_skelenton(output_dic['est'], hmlpe.part_idx, draw)
            input_dic['pose_ax'].set_data(np.asarray(img))
            input_dic['pose_fig'].canvas.draw()
        else:
            ## Plot pose
            input_dic['pose_ax'].cla()
            #input_dic['pose_ax'].view_init(azim=-92, elev=-46)
            vlim = 0.4
            input_dic['pose_ax'].set_xlim([-vlim, vlim])
            input_dic['pose_ax'].set_ylim([-vlim, vlim])
            input_dic['pose_ax'].set_zlim([-vlim, vlim])
            self.plot_skelenton(output_dic['est'], h36m.part_idx,
                                input_dic['pose_ax'])
            imgproc.turn_off_axis(input_dic['pose_ax'])
            input_dic['pose_fig'].canvas.draw()
        if 'outputdir' in input_dic:
            outputdir = input_dic['outputdir']
            savecnt = input_dic['savecnt']
            print outputdir
            for i in range(2):
                plt.figure(i)
                plt.savefig(
                    iu.fullfile(outputdir, 'fig_%02d_%06d.jpg' % (i, savecnt)))
            input_dic['savecnt'] = savecnt + 1
        return input_dic['camera_im'], input_dic['pose_ax']
Example #49
0
def ReadTestDataToCifarDic(imgdir,
                           annot_dir,
                           tech_report_path,
                           save_path=None):
    """
    This program will use the detection window by the author
    image data are extracted from the extended detection window
    return type: dictionary with the following keys
                    ep
                    fr
                    annotation
                    oriimg  
                    bbox
                    joints8          ! (coordinate w.r.t bounding box)
                    data             !
                    indmap           ! (all zeros)
    This is the previous version, latest version is
      ReadTestDataToHMLPEDic
    """
    import Stickmen
    import scipy.io as sio
    import PIL
    import iutils as iu
    from PIL import Image
    import matplotlib.pyplot as plt
    import iconvnet_datacvt as icvt
    rp = sio.loadmat(tech_report_path)['techrep2010_buffy']
    d_annot = ExtractAnnotation(annot_dir)
    ndata = rp.shape[1]
    data = []
    tsize = 112  # the size of test image
    indsize = 8
    data = dict()
    ndet = sum([(1 if rp[0, i][0].size == 0 else rp[0, i][0].size)
                for i in range(ndata)])
    data['ep'] = np.ndarray([ndet], dtype=np.int)
    data['fr'] = np.ndarray([ndet], dtype=np.int)
    data['annotation'] = np.ndarray([4, 6, ndet], dtype=np.float32)
    data['indmap'] = np.zeros([7, indsize, indsize, ndet],
                              dtype=np.bool,
                              order='F')
    data['indmap_para'] = (30, 12)
    data['oribbox'] = np.ndarray([4, ndet], dtype=np.float32)
    data['data'] = np.ndarray([tsize, tsize, 3, ndet],
                              order='F',
                              dtype=np.uint8)
    data['joints8'] = np.ndarray([8, 2, ndet], dtype=np.float32)
    data['oridet'] = np.ndarray([4, ndet], dtype=np.float32)
    data['imgdir'] = imgdir
    idx = 0
    for i in range(ndata):
        ep = rp[0, i][1][0][0]
        fr = rp[0, i][2][0][0]
        imgpath = iu.fullfile(imgdir, 'buffy_s5e'+str(ep)+'_original', \
                               ('%06d' % fr) + '.jpg')
        img = Image.open(imgpath)

        if rp[0, i][0].size == 0:  # Not detection found by detector
            data['ep'][..., idx] = ep
            data['fr'][..., idx] = fr
            data['annotation'][...,
                               idx] = Stickmen.ReorderStickmen(d_annot[ep][fr])
            data['oribbox'][..., idx] = np.zeros([4], dtype=np.float32)
            data['joints8'][..., idx] = np.ones([8, 2],
                                                dtype=np.float32) * tsize * 2
            data['data'][..., idx] = np.zeros([tsize, tsize, 3],
                                              dtype=np.uint8)
            data['oridet'][..., idx] = np.zeros([4], dtype=np.float32)
            idx = idx + 1
            continue
        for j in range(rp[0, i][0].size):
            det = rp[0, i][0][0, j]['det'][0]  # det = (x,y,x1,y1)
            data['oridet'][..., idx] = det
            det = ExtendBndbox(det, img.size)
            arr_img = np.asarray(img)[det[1]:det[3] + 1, det[0]:det[2] + 1, :]
            res_img = Image.fromarray(arr_img).resize((tsize, tsize))
            data['ep'][..., idx] = ep
            data['fr'][..., idx] = fr
            data['annotation'][...,
                               idx] = Stickmen.ReorderStickmen(d_annot[ep][fr])
            data['data'][..., idx] = np.asarray(res_img)
            tmppoints = icvt.convert_AHEcoor_to_joints8(
                data['annotation'][..., idx])

            data['joints8'][..., idx] = TransformPoints(
                tmppoints, det,
                np.asarray(res_img.size) - 1)
            data['oribbox'][..., idx] = det
            idx = idx + 1

    data['data'] = data['data'].reshape((-1, ndet), order='F')
    if save_path is not None:
        icvt.ut.pickle(save_path, data)
    return data
Example #50
0
    def generate_positive_data_from_mat(self, generate_type, matpath):
        """
        in each mat
                        mat['X'] is image data
                        mat['Y'] is npart x ndata array
        
        """
        mat = sio.loadmat(matpath)
        dim = mat['dim'][0]
        newdim = self.savedata_info['newdim']
        if newdim[0] > dim[0] or newdim[1] > dim[1]:
            raise HMLPEError('Invalid new size ')
        if self.meta['matdim'] is None:
            self.meta['matdim'] = dim  # record the dimension before sampling
        else:
            if np.any(self.meta['matdim'] != dim):
                raise HMLPEError(
                    'Inconsistent matdim: Previous dim is %s, current mat dim is %s'
                    % (str(self.meta['matdim']), str(dim)))
        ndata = (mat['X'].shape)[1]
        if generate_type in {'rt': 1}:
            sample_num = self.savedata_info['sample_num']
            totaldata = sample_num * ndata * 2
            do_mirror = True
        elif generate_type == 'ct':
            sample_num = 1
            totaldata = sample_num * ndata
            do_mirror = False
        if (dim[0] - newdim[0] + 1) * (dim[1] - newdim[1] + 1) < sample_num:
            raise HMLPEError(' Invalid sample_num')

        nparts = self.meta['nparts']
        self.meta['ndata'] += totaldata

        ### BEGIN COMMENT
        # njoints = self.meta['njoints']
        # if njoints == 8:
        #     dicjtname = 'joints8'
        # else:
        #     #raise HMLPEError('njoints = %d No supported yet' % (njoints))
        #     dicjtname = 'joints'
        # newdim = self.savedata_info['newdim']
        # filter_size = self.savedata_info['indmap_para']['filter_size']
        # stride =  self.savedata_info['indmap_para']['stride']
        # rate = self.savedata_info['indmap_para']['rate']
        # mdim = self.get_indmapdim(newdim, filter_size, stride)

        # if newdim[0] > dim[0] or newdim[1] > dim[1]:
        #     raise HMLPEError('Invalid new size ')
        # if (dim[0] - newdim[0] + 1) * (dim[1] - newdim[1] + 1) < sample_num:
        #     raise HMLPEError(' Invalid sample_num')
        # joint_filter_size = self.savedata_info['indmap_para']['joint_filter_size']
        # joint_stride = self.savedata_info['indmap_para']['joint_stride']
        # jtmdim = self.get_indmapdim(newdim, joint_filter_size, joint_stride)

        ### END COMMENT
        fieldpool = self.get_fieldpool_for_positive_mat_data()
        fieldpool['mat'] = mat
        self.meta['ind_dim']['part_indmap'] = fieldpool['mdim']
        self.meta['ind_dim']['joint_indmap'] = fieldpool['jtmdim']
        res = {}
        per_size = min(totaldata, self.savedata_info['max_batch_size'])

        allX = mat['X'].reshape((dim[0], dim[1], dim[2], ndata), order='F')
        Y2dname = fieldpool['Y2dname']
        allY = mat[Y2dname].reshape((2, -1, ndata), order='F')
        newlen = iu.prod(newdim)
        # prepare data buffer
        res = self.prepare_savebuffer({'data':fieldpool['newdim'], 'part_indmap':fieldpool['mdim'], 'joint_indmap': fieldpool['jtmdim']},\
                                       per_size, self.meta['nparts'],\
                                        self.meta['njoints'])
        tmpres = dict()
        pre_nc = 0
        nc = 0
        res['is_positive'][:] = True
        for it in range(ndata):
            curX = allX[..., it]
            curY = allY[..., it].transpose()
            curfilename = str(
                mat['imagepathlist'][0,
                                     it][0]) if 'imagepathlist' in mat else ''
            mesh = self.create_augumentation_mesh(dim, newdim, generate_type)
            l = (np.random.permutation(range(len(mesh))))[:sample_num]
            fieldpool['matidx'] = it
            fieldpool['curfilename'] = curfilename
            for p in l:
                r, c = mesh[p]
                tmpX = curX
                tmpX = np.roll(tmpX, shift=-int(r), axis=0)
                tmpX = np.roll(tmpX, shift=-int(c), axis=1)
                tmpY = curY - 1 + np.asarray([-c, -r])
                fieldpool['r'] = r
                fieldpool['c'] = c
                ####
                fieldpool['curX'] = tmpX
                fieldpool['Y'] = tmpY

                # tmpX = tmpX[:newdim[0], :newdim[1],:]
                # res['data'][...,nc - pre_nc] = tmpX
                # res[dicjtname][..., nc - pre_nc] = tmpY
                # res['jointmasks'][...,nc - pre_nc] = self.makejointmask(newdim, tmpY)
                # res['filenames'][nc - pre_nc] = curfilename
                # res['oribbox'][...,nc-pre_nc] = mat['oribbox'][...,it]
                # res['indmap'][...,nc-pre_nc] = self.create_part_indicatormap(tmpY, self.meta['savedata_info']['part_idx'], mdim, rate, filter_size, stride)
                # res['joint_indmap'][...,nc-pre_nc] = self.create_joint_indicatormap(tmpY, jtmdim, joint_filter_size, joint_stride)
                # res['joint_sample_offset'][...,nc-pre_nc] = [c, r]
                # res['is_mirror'][...,nc-pre_nc] = False
                self.fill_in_positive_mat_data_to_dic(res, nc - pre_nc, \
                                                      fieldpool, False)
                nc = nc + 1
                if not do_mirror:
                    continue
                #flip image
                tmpX = tmpX[:, ::-1, :]
                tmpY = self.flip_joints(newdim, tmpY)
                fieldpool['curX'] = tmpX
                fieldpool['Y'] = tmpY
                self.fill_in_positive_mat_data_to_dic(res, nc - pre_nc, \
                                                      fieldpool, True)
                # res['data'][...,nc - pre_nc] = tmpX
                # res[dicjtname][...,nc -pre_nc] = tmpY
                # res['jointmasks'][...,nc - pre_nc] = self.makejointmask(newdim, tmpY)
                # res['filenames'][nc - pre_nc] = curfilename

                # res['oribbox'][...,nc-pre_nc] = mat['oribbox'][...,it]
                # res['indmap'][...,nc-pre_nc] = self.create_part_indicatormap(tmpY, part_idx, mdim, rate, filter_size, stride)
                # res['joint_indmap'][...,nc-pre_nc] = self.create_joint_indicatormap(tmpY, jtmdim, joint_filter_size, joint_stride)
                # res['joint_sample_offset'][...,nc-pre_nc] = [c, r]
                # res['is_mirror'][...,nc-pre_nc] = True
                nc = nc + 1
            t = 2 if do_mirror else 1
            if nc - pre_nc + t * sample_num > per_size or nc == totaldata:
                tmpres = self.truncated_copydic(res, nc - pre_nc)
                tmpres['data'] = tmpres['data'].reshape((-1, nc - pre_nc),
                                                        order='F')
                self.meta['data_sum'] = self.meta['data_sum'] + tmpres[
                    'data'].sum(axis=1, dtype=float)
                savepath = iu.fullfile(self.savedata_info['savedir'], \
                                       self.savedata_info['savename'] + \
                                       '_' +  str(self.batch_id))
                myio.pickle(savepath, tmpres)
                self.batch_id = self.batch_id + 1
                pre_nc = nc
Example #51
0
    def generate_positive_data_from_mat(self, generate_type, matpath):
        """
        in each mat
                        mat['X'] is image data
                        mat['Y'] is npart x ndata array
        
        """
        mat = sio.loadmat(matpath)
        dim = mat['dim'][0]
        newdim = self.savedata_info['newdim']
        if newdim[0] > dim[0] or newdim[1] > dim[1]:
            raise HMLPEError('Invalid new size ')
        if self.meta['matdim'] is None:
            self.meta['matdim'] = dim # record the dimension before sampling
        else:
            if np.any(self.meta['matdim'] != dim):
                raise HMLPEError('Inconsistent matdim: Previous dim is %s, current mat dim is %s' % (str(self.meta['matdim']), str(dim)))
        ndata = (mat['X'].shape)[1]
        if generate_type in {'rt':1}:
            sample_num = self.savedata_info['sample_num']
            totaldata = sample_num * ndata * 2
            do_mirror = True
        elif generate_type == 'ct':
            sample_num  = 1
            totaldata = sample_num * ndata
            do_mirror = False
        if (dim[0] - newdim[0] + 1) * (dim[1] - newdim[1] + 1) < sample_num:
            raise HMLPEError(' Invalid sample_num')
        
        nparts = self.meta['nparts']
        self.meta['ndata'] += totaldata

        ### BEGIN COMMENT
        # njoints = self.meta['njoints']
        # if njoints == 8:
        #     dicjtname = 'joints8'
        # else:
        #     #raise HMLPEError('njoints = %d No supported yet' % (njoints))
        #     dicjtname = 'joints'
        # newdim = self.savedata_info['newdim']
        # filter_size = self.savedata_info['indmap_para']['filter_size']
        # stride =  self.savedata_info['indmap_para']['stride']
        # rate = self.savedata_info['indmap_para']['rate']
        # mdim = self.get_indmapdim(newdim, filter_size, stride)
        

        # if newdim[0] > dim[0] or newdim[1] > dim[1]:
        #     raise HMLPEError('Invalid new size ')
        # if (dim[0] - newdim[0] + 1) * (dim[1] - newdim[1] + 1) < sample_num:
        #     raise HMLPEError(' Invalid sample_num')
        # joint_filter_size = self.savedata_info['indmap_para']['joint_filter_size']
        # joint_stride = self.savedata_info['indmap_para']['joint_stride']
        # jtmdim = self.get_indmapdim(newdim, joint_filter_size, joint_stride)
        
        ### END COMMENT
        fieldpool = self.get_fieldpool_for_positive_mat_data()
        fieldpool['mat'] = mat
        self.meta['ind_dim']['part_indmap'] = fieldpool['mdim']
        self.meta['ind_dim']['joint_indmap'] = fieldpool['jtmdim']
        res = {}
        per_size = min(totaldata, self.savedata_info['max_batch_size'])
        
        allX = mat['X'].reshape( (dim[0], dim[1],dim[2], ndata), order='F')
        Y2dname = fieldpool['Y2dname']
        allY = mat[Y2dname].reshape( (2,-1, ndata), order='F') 
        newlen = iu.prod( newdim )
        # prepare data buffer
        res = self.prepare_savebuffer({'data':fieldpool['newdim'], 'part_indmap':fieldpool['mdim'], 'joint_indmap': fieldpool['jtmdim']},\
                                       per_size, self.meta['nparts'],\
                                        self.meta['njoints'])
        tmpres = dict()
        pre_nc = 0
        nc = 0
        res['is_positive'][:] = True
        for it in range(ndata):
            curX = allX[...,it]
            curY = allY[...,it].transpose()
            curfilename = str(mat['imagepathlist'][0,it][0]) if 'imagepathlist' in mat else ''
            mesh = self.create_augumentation_mesh(dim, newdim, generate_type)
            l = (np.random.permutation(range(len(mesh))))[:sample_num]
            fieldpool['matidx'] = it
            fieldpool['curfilename'] = curfilename
            for p in l:
                r,c = mesh[p]
                tmpX = curX
                tmpX = np.roll(tmpX, shift=-int(r), axis = 0)
                tmpX = np.roll(tmpX, shift=-int(c), axis = 1)
                tmpY = curY - 1 + np.asarray([-c,-r])
                fieldpool['r'] = r
                fieldpool['c'] = c
                ####
                fieldpool['curX'] = tmpX
                fieldpool['Y'] = tmpY
                
                # tmpX = tmpX[:newdim[0], :newdim[1],:]
                # res['data'][...,nc - pre_nc] = tmpX                
                # res[dicjtname][..., nc - pre_nc] = tmpY
                # res['jointmasks'][...,nc - pre_nc] = self.makejointmask(newdim, tmpY)
                # res['filenames'][nc - pre_nc] = curfilename
                # res['oribbox'][...,nc-pre_nc] = mat['oribbox'][...,it]
                # res['indmap'][...,nc-pre_nc] = self.create_part_indicatormap(tmpY, self.meta['savedata_info']['part_idx'], mdim, rate, filter_size, stride)
                # res['joint_indmap'][...,nc-pre_nc] = self.create_joint_indicatormap(tmpY, jtmdim, joint_filter_size, joint_stride)
                # res['joint_sample_offset'][...,nc-pre_nc] = [c, r]
                # res['is_mirror'][...,nc-pre_nc] = False
                self.fill_in_positive_mat_data_to_dic(res, nc - pre_nc, \
                                                      fieldpool, False)
                nc = nc + 1
                if not do_mirror:
                    continue
                #flip image
                tmpX = tmpX[:,::-1,:]
                tmpY = self.flip_joints(newdim, tmpY)
                fieldpool['curX'] = tmpX
                fieldpool['Y'] = tmpY
                self.fill_in_positive_mat_data_to_dic(res, nc - pre_nc, \
                                                      fieldpool, True)
                # res['data'][...,nc - pre_nc] = tmpX
                # res[dicjtname][...,nc -pre_nc] = tmpY
                # res['jointmasks'][...,nc - pre_nc] = self.makejointmask(newdim, tmpY)
                # res['filenames'][nc - pre_nc] = curfilename
                
                # res['oribbox'][...,nc-pre_nc] = mat['oribbox'][...,it]            
                # res['indmap'][...,nc-pre_nc] = self.create_part_indicatormap(tmpY, part_idx, mdim, rate, filter_size, stride)
                # res['joint_indmap'][...,nc-pre_nc] = self.create_joint_indicatormap(tmpY, jtmdim, joint_filter_size, joint_stride)
                # res['joint_sample_offset'][...,nc-pre_nc] = [c, r]
                # res['is_mirror'][...,nc-pre_nc] = True
                nc = nc + 1
            t = 2 if do_mirror else 1
            if nc - pre_nc + t * sample_num > per_size or nc == totaldata:
                tmpres = self.truncated_copydic(res, nc-pre_nc)
                tmpres['data'] = tmpres['data'].reshape((-1,nc-pre_nc),order='F')
                self.meta['data_sum'] = self.meta['data_sum'] + tmpres['data'].sum(axis=1,dtype=float)
                savepath = iu.fullfile(self.savedata_info['savedir'], \
                                       self.savedata_info['savename'] + \
                                       '_' +  str(self.batch_id))
                myio.pickle( savepath, tmpres)       
                self.batch_id = self.batch_id + 1
                pre_nc = nc
Example #52
0
def ReadCropImageToHMLPEDic(dataset_dir, save_dir, istrain=False, isOC=True):
    """
    This function will be used for generating testing data
    Because training and testing data has different format in oribbox

    For generating trainign samples,
    please use
    create_lsp_regression_data.m
    (dataset_dir, type=3, opt)
      opt.OC = ?
      
    and hmlpe.py
    """
    import iutils as iu
    import iread.hmlpe as hmlpe
    import iread.myio as mio
    import scipy.io as sio
    from PIL import Image
    ndata = 1000
    if istrain:
        s_idx = 0
    else:
        s_idx = 1000
    imgdir = iu.fullfile(dataset_dir, 'images-crop')
    if isOC:
        dmat = sio.loadmat(iu.fullfile(dataset_dir, 'jointsOC.mat'))
    else:
        dmat = sio.loadmat(iu.fullfile(dataset_dir, 'joints-crop.mat'))
    lsp_jt = dmat['joints']
    dimdic = {
        'data': (112, 112, 3),
        'part_indmap': (8, 8),
        'joint_indmap': (8, 8)
    }
    nparts = 7
    njoints = 8
    d = hmlpe.HMLPE.prepare_savebuffer(dimdic, ndata, nparts, njoints)
    d['data'] = d['data'].reshape((-1, ndata), order='F')
    d['is_mirror'][:] = False
    d['is_positive'][:] = True
    for idx in range(s_idx, s_idx + ndata):
        imgpath = iu.fullfile(imgdir, 'im%04d.jpg' % (idx + 1))
        img = Image.open(imgpath)
        i = idx - s_idx
        orijoints8, isvisible = GetJoints8(lsp_jt[..., idx])
        bbox = GetUpperBodyBox(img.size)
        img_arr = np.asarray(img)[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
        s = np.asarray([(dimdic['data'][1] - 1.0) / (bbox[2] - bbox[0]),
                        (dimdic['data'][0] - 1.0) / (bbox[3] - bbox[1])
                        ]).reshape((1, 2))
        tjoints = (orijoints8 - bbox[0:2, :].reshape((1, 2))) * s
        masks = hmlpe.HMLPE.makejointmask(dimdic['data'], tjoints)
        d['data'][..., i] = np.asarray(
            Image.fromarray(img_arr).resize(
                (dimdic['data'][0], dimdic['data'][1]))).reshape(
                    (-1, 1), order='F').flatten()
        d['joints8'][..., i] = tjoints
        d['jointmasks'][..., i] = np.logical_and(masks, isvisible)
        d['filenames'][i] = imgpath
        d['oribbox'][..., i] = bbox.flatten()
        d['indmap'][..., i] = hmlpe.HMLPE.create_part_indicatormap(
            tjoints, hmlpe.part_idx, dimdic['part_indmap'], 0.3, 30.0, 12.0)
        d['joint_indmap'][..., i] = hmlpe.HMLPE.create_joint_indicatormap(
            tjoints, dimdic['joint_indmap'], 30.0, 12.0)
    mio.pickle(iu.fullfile(save_dir, 'data_batch_1'), d)
Example #53
0
def ReadDataToHMLPEDic(imgdir, example_path, data_category, max_per_batch,
                       save_dir):
    """
    Read all data in 'data_category'
    into HMLPE dictionary
    There is no need to generating training data, since they can be generated in
    hmlpe.py 
    """
    import scipy.io as sio
    import iutils as iu
    import iread.myio as mio
    import iread.hmlpe as hmlpe
    import imgproc
    from PIL import Image
    if data_category != 'istest':
        print 'Warn: The correctness of data type %s is not guaranteed' % data_category
    all_example = sio.loadmat(example_path)['examples']
    examples = ExtractSubExample(all_example, data_category)
    ndata = examples.shape[-1]
    iu.ensure_dir(save_dir)
    buf_size = min(ndata, max_per_batch)
    dimdic = {
        'data': (112, 112, 3),
        'part_indmap': (8, 8),
        'joint_indmap': (8, 8)
    }
    nparts = 7
    njoints = 8
    d = hmlpe.HMLPE.prepare_savebuffer(dimdic, buf_size, nparts, njoints)
    d['oridet'] = np.zeros((4, buf_size), dtype=np.int)
    d['coords'] = np.ndarray((2, 29, buf_size), dtype=np.float32)
    tdsize = dimdic['data'][0]
    dsize = dimdic['data'][0] * dimdic['data'][1] * dimdic['data'][2]
    d['data'] = d['data'].reshape((dsize, -1), order='F')
    d['is_positive'][:] = True
    d['is_mirror'][:] = False
    bid = 1
    j = 0
    for i in range(ndata):
        if j == max_per_batch:
            mio.pickle(iu.fullfile(save_dir, 'data_batch_%d' % bid), d)
            bid = bid + 1
            if ndata - i < max_per_batch:
                d = hmlpe.HMLPE.prepare_savebuffer(dimdic, buf_size, nparts,
                                                   njoints)
        fp = iu.fullfile(imgdir, str(examples[i]['filepath'][0]))
        img = Image.open(fp)
        tbox = examples[i]['torsobox'][0].reshape((4))
        d['filenames'][j] = fp
        d['coords'][..., j] = examples[i]['coords']
        d['oribbox'][..., j] = bbox = ExtendBndbox(tbox, img.size)
        orijoints8 = CvtCoordsToJoints(examples[i]['coords']).reshape(
            (8, 2), order='C') - 1  # to python stype 0-idx
        d['joints8'][..., j] = TransformPoints(orijoints8, bbox,
                                               dimdic['data']).reshape(
                                                   (8, 2), order='C')
        imgarr = imgproc.ensure_rgb(np.asarray(img))
        sub_img = Image.fromarray(imgarr[bbox[1]:bbox[3], bbox[0]:bbox[2], :])
        data_img = np.asarray(
            sub_img.resize((dimdic['data'][0], dimdic['data'][1]))).reshape(
                (dsize), order='F')
        d['data'][..., j] = data_img
        d['indmap'][..., j] = hmlpe.HMLPE.create_part_indicatormap(
            d['joints8'][..., j], hmlpe.part_idx, dimdic['part_indmap'], 0.3,
            30.0, 12.0)
        d['joint_indmap'][..., j] = hmlpe.HMLPE.create_joint_indicatormap(
            d['joints8'][..., j], dimdic['joint_indmap'], 30.0, 12.0)
        d['jointmasks'][...,
                        j] = hmlpe.HMLPE.makejointmask(dimdic['data'],
                                                       d['joints8'][..., j])
        j = j + 1
    mio.pickle(iu.fullfile(save_dir, 'data_batch_%d' % bid), d)
Example #54
0
def GetImagePath(imgdir, ep, fr):
    import iutils as iu
    imgpath = iu.fullfile(imgdir, 'buffy_s5e'+str(ep)+'_original', \
                               ('%06d' % fr) + '.jpg')
    return imgpath
Example #55
0
import sys
sys.path.append('/home/grads/sijinli2/Projects/DHMLPE/Python/src')
sys.path.append('/home/grads/sijinli2/I_ProgramFile/I_Python/Project/I_utils')
sys.path.append('/home/grads/sijinli2/I_ProgramFile/I_Python/Project')
import iutils as iu
import os
ppath = iu.getparentpath(os.path.realpath(__file__), 2)
# sys.path.append(ppath)
# sys.path.append(iu.fullfile(ppath, 'task'))
sys.path.append(iu.fullfile(ppath, 'src'))
from options import *
Example #56
0
def shuffle_data(source_dir, target_dir, max_per_file=4000):
    """
    This function will shuflle all the data in source_dir
    and save it to target_dir
    """
    if source_dir == target_dir:
        raise HMLPEError('source dir can not be the same as target dir')
    import shutil
    import sys
    iu.ensure_dir(target_dir)
    shutil.copy(iu.fullfile(source_dir, 'batches.meta'), \
                iu.fullfile(target_dir, 'batches.meta'))
    meta = myio.unpickle(iu.fullfile(source_dir, 'batches.meta'))
    ndata = meta['ndata']
    nbatch = (ndata - 1) / max_per_file + 1
    nparts = meta['nparts']
    njoints = meta['njoints']
    newdim = meta['savedata_info']['newdim']
    filter_size = meta['savedata_info']['indmap_para']['filter_size']
    stride = meta['savedata_info']['indmap_para']['stride']
    joint_filter_size = meta['savedata_info']['indmap_para'][
        'joint_filter_size']
    joint_stride = meta['savedata_info']['indmap_para']['joint_stride']
    mdim = HMLPE.get_indmapdim(newdim, filter_size, stride)
    jtmdim = HMLPE.get_indmapdim(newdim, joint_filter_size, joint_stride)
    print('There are %d data in total, I need %d batch to hold it' %
          (ndata, nbatch))
    print 'Begin creating empty files'
    rest = ndata
    d = HMLPE.prepare_savebuffer({'data':newdim, 'part_indmap':mdim, \
                                       'joint_indmap': jtmdim}, max_per_file, nparts, njoints)
    HMLPE.adjust_savebuffer_shape(d)
    for b in range(nbatch):
        cur_n = min(max_per_file, rest)
        if b != nbatch - 1:
            saved = d
        else:
            saved = HMLPE.prepare_savebuffer({'data':newdim, 'part_indmap':mdim, \
                                       'joint_indmap': jtmdim}, cur_n, nparts, njoints)
            HMLPE.adjust_savebuffer_shape(saved)
        myio.pickle(iu.fullfile(target_dir, 'data_batch_%d' % (b + 1)), saved)
        rest = rest - cur_n
    print 'End creating'
    allbatchfn = iu.getfilelist(source_dir, 'data_batch_\d+')
    np.random.seed(7)
    perm = range(ndata)
    np.random.shuffle(perm)
    buf_cap = 12  # store six batch at most
    nround = (nbatch - 1) / buf_cap + 1
    for rd in range(nround):
        print('Round %d of %d' % (rd, nround))
        buf = dict()
        offset = 0
        for fn in allbatchfn:
            print('Processing %s' % fn)
            d = myio.unpickle(iu.fullfile(source_dir, fn))
            cur_n = d['data'].shape[-1]
            for b in range(rd * buf_cap, min(nbatch, (rd + 1) * buf_cap)):
                sys.stdout.write('\rpadding %d of %d' % (b + 1, nbatch))
                sys.stdout.flush()
                sidx = b * max_per_file
                eidx = min(ndata, sidx + max_per_file)
                cur_idx_list = [
                    i for i in range(cur_n)
                    if perm[offset + i] >= sidx and perm[offset + i] < eidx
                ]
                if len(cur_idx_list) == 0:
                    continue
                if not b in buf:
                    dsave = myio.unpickle(
                        iu.fullfile(target_dir, 'data_batch_%d' % (b + 1)))
                    buf[b] = dsave
                else:
                    dsave = buf[b]
                save_idx_list = [perm[x + offset] - sidx for x in cur_idx_list]
                HMLPE.selective_copydic(d, dsave, cur_idx_list, save_idx_list)
                # myio.pickle(iu.fullfile(target_dir, 'data_batch_%d' % (b+1)), dsave)
            print 'Finished %s' % fn
            offset = offset + cur_n
        for b in range(rd * buf_cap, min(nbatch, (rd + 1) * buf_cap)):
            myio.pickle(iu.fullfile(target_dir, 'data_batch_%d' % (b + 1)),
                        buf[b])
Example #57
0
def ReadTestDataToHMLPEDic(imgdir,
                           annot_dir,
                           tech_report_path,
                           save_path=None,
                           iou_thresh=0.5):
    """
    This function will use detection window provided by the author
     Also, erro detection window are not included
     For image without detection, all the joint points will be zero
     
    """
    import Stickmen
    import scipy.io as sio
    import PIL
    import iutils as iu
    from PIL import Image
    import matplotlib.pyplot as plt
    import iread.hmlpe as hp
    import iread.myio as mio
    import ipyml.geometry as igeo
    rp = sio.loadmat(tech_report_path)['techrep2010_buffy']
    d_annot = ExtractAnnotation(annot_dir)
    ndata = rp.shape[1]
    data = []
    tsize = 112  # the size of test image
    part_ind_dim = (8, 8)
    joint_ind_dim = (8, 8)
    filter_size = 32.0
    stride = 12.0
    indrate = 0.3
    joint_filter_size = 32.0
    joint_stride = 12.0
    njoints = 8
    nparts = 7
    ndet = ndata  # One test point for one image
    newdim = (tsize, tsize, 3)
    data = hp.HMLPE.prepare_savebuffer({'data':newdim,\
                                        'part_indmap': part_ind_dim,\
                                        'joint_indmap':joint_ind_dim },\
                                        ndet, nparts, njoints)
    # add Buffy specific field
    add_buffy_specific_field(data, ndet, imgdir)
    idx = 0
    f_calc_area = lambda rec: (rec[1][1] - rec[0][1]) * (rec[1][0] - rec[0][
        0]) if len(rec) == 2 else 0
    f_mk_rec = lambda det: ((det[0], det[1]), (det[2], det[3]))
    for i in range(ndata):
        ep = rp[0, i][1][0][0]
        fr = rp[0, i][2][0][0]
        imgpath = iu.fullfile(imgdir, 'buffy_s5e'+str(ep)+'_original', \
                               ('%06d' % fr) + '.jpg')
        img = Image.open(imgpath)
        data['ep'][..., i] = ep
        data['fr'][..., i] = fr
        data['filenames'][i] = imgpath
        data['is_positive'][..., i] = True
        data['annotation'][..., i] = Stickmen.ReorderStickmen(d_annot[ep][fr])
        data['gt_det'][..., i] = np.asarray(
            Stickmen.EstDetFromStickmen(data['annotation'][..., i]))
        gt_rec = f_mk_rec(data['gt_det'][..., i])  #
        gt_area = f_calc_area(gt_rec)
        if rp[0, i][0].size == 0:  # No detection are found in this image
            # imgdata will also be all zero
            # oridet will be all zero
            # oribbox will be all zero
            # joints8 will be all zero
            # jointmasks wiil be all zero
            # indmap will be all zero
            # joint_indmap will be all zero
            # nothing need to be done, since they are default value
            pass
        else:
            m = -1
            for j in range(rp[0, i][0].size):
                det = rp[0, i][0][0, j]['det'][0]  # det = (x,y,x1,y1)
                cur_rec = f_mk_rec(det)
                int_area = f_calc_area(igeo.RectIntersectRect(cur_rec, gt_rec))
                if int_area > (gt_area - int_area) * iou_thresh:
                    m = j
                    break
            if m != -1:
                oribbox = ExtendBndbox(det, img.size)
                arr_img = np.asarray(img)[oribbox[1]:oribbox[3]+1, \
                                          oribbox[0]:oribbox[2]+1,:]
                res_img = Image.fromarray(arr_img).resize((tsize, tsize))
                data['data'][..., i] = np.asarray(res_img)
                tmppoints = convert_AHEcoor_to_joints8(data['annotation'][...,
                                                                          i])
                data['joints8'][..., i] = TransformPoints(
                    tmppoints, oribbox,
                    np.asarray(res_img.size) - 1)
                data['jointmasks'][..., i] = hp.HMLPE.makejointmask(
                    newdim, data['joints8'][..., i])
                data['indmap'][..., i] = hp.HMLPE.create_part_indicatormap(
                    data['joints8'][..., i], hp.part_idx, part_ind_dim,
                    indrate, filter_size, stride)
                data['joint_indmap'][...,
                                     i] = hp.HMLPE.create_joint_indicatormap(
                                         data['joints8'][...,
                                                         i], joint_ind_dim,
                                         joint_filter_size, joint_stride)
                data['oribbox'][..., i] = oribbox
    data['data'] = data['data'].reshape((-1, ndet), order='F')
    if save_path is not None:
        mio.pickle(save_path, data)
    return data