示例#1
0
def pack_01():
    """
    input:   fc_j0 feature,  rel_pose
    outputs: rel_pose, fc_j0_feature
    """
    source_feature_network_path = '/opt/visal/tmp/for_sijin/Data/saved/theano_models/2015_02_02_acm_act_14_exp_2_19_graph_0012/'
    source_meta_path = '/opt/visal/tmp/for_sijin/tmp/tmp_saved'
    
    exp_meta_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_ASM_act_14_exp_2/batches.meta'
    save_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_FCJ0_act_14'
    feature_name = 'Relative_Y3d_mono_body'
    res = dict()
    exp_meta = mio.unpickle(exp_meta_path)
    source_meta = dutils.collect_feature_meta(source_meta_path)
    rel_pose = exp_meta[feature_name]
    fc_j0_feature = source_meta['feature_list'][1]
    rel_gt = source_meta['feature_list'][0]

    diff = rel_gt.reshape((-1, rel_gt.shape[-1]),order='F') * 1200 - rel_pose
    print 'diff is {}'.format(diff.flatten().sum())
    feature_list =  [rel_pose, fc_j0_feature]
    feature_dim = [rel_pose.shape[0], fc_j0_feature.shape[0]]
    print feature_dim, '<<<feature dim'
    res = {'feature_list': feature_list, 'feature_dim':feature_dim,
           'info':{'indexes':source_meta['info']['indexes'],
                   'max_depth': 1200.0}}
    indexes = res['info']['indexes']
    res['info']['soure_feature_network_path'] = source_feature_network_path
    print indexes[:10], min(indexes), max(indexes)
    print 'The number of data is {} == {}'.format(indexes.size, feature_list[0].shape[-1])
    iu.ensure_dir(save_path)
    mio.pickle(iu.fullfile(save_path, 'batches.meta'), res)
示例#2
0
文件: cifar.py 项目: itsuper7/convnet
def MakeDataFromImages(imgdir, max_per_batch , save_dir = None, save_name=None):
    import iutils as iu
    import iconvnet_datacvt as icvt
    from PIL import Image
    if max_per_batch == 0:
        raise CifarError('max_per_batch can''t not be zero')
    allfiles = iu.getfilelist(imgdir, '.*jpg|.*bmp|.*png$')
    ndata = len(allfiles)
    iu.ensure_dir(save_dir)
    d = PrepareData(min(max_per_batch, ndata))
    j = 0
    if save_name is None:
        save_name = 'data_batch'
    bid = 1
    for i,fn in enumerate(allfiles):
        if j == max_per_batch:
            j = 0
            if not save_dir is None:
                icvt.ut.pickle(iu.fullfile(save_dir, save_name + '_' + str(bid)), d)
                bid = bid + 1 
            if ndata - i < max_per_batch:
                d = PrepareData(ndata-i)
        fp = iu.fullfile(imgdir, fn)
        
        img = iu.imgproc.ensure_rgb(np.asarray(Image.open(fp)))
        img = Image.fromarray(img).resize((img_size[0],img_size[1]))
        arr_img = np.asarray(img).reshape((dim_data), order='F')
        d['data'][...,j] = arr_img
        j = j + 1
    if not save_dir is None:
        icvt.ut.pickle(iu.fullfile(save_dir, save_name + '_' + str(bid)), d)         
示例#3
0
def MakeDataFromImages(imgdir, max_per_batch, save_dir=None, save_name=None):
    import iutils as iu
    import iconvnet_datacvt as icvt
    from PIL import Image
    if max_per_batch == 0:
        raise CifarError('max_per_batch can' 't not be zero')
    allfiles = iu.getfilelist(imgdir, '.*jpg|.*bmp|.*png$')
    ndata = len(allfiles)
    iu.ensure_dir(save_dir)
    d = PrepareData(min(max_per_batch, ndata))
    j = 0
    if save_name is None:
        save_name = 'data_batch'
    bid = 1
    for i, fn in enumerate(allfiles):
        if j == max_per_batch:
            j = 0
            if not save_dir is None:
                icvt.ut.pickle(
                    iu.fullfile(save_dir, save_name + '_' + str(bid)), d)
                bid = bid + 1
            if ndata - i < max_per_batch:
                d = PrepareData(ndata - i)
        fp = iu.fullfile(imgdir, fn)

        img = iu.imgproc.ensure_rgb(np.asarray(Image.open(fp)))
        img = Image.fromarray(img).resize((img_size[0], img_size[1]))
        arr_img = np.asarray(img).reshape((dim_data), order='F')
        d['data'][..., j] = arr_img
        j = j + 1
    if not save_dir is None:
        icvt.ut.pickle(iu.fullfile(save_dir, save_name + '_' + str(bid)), d)
示例#4
0
def add_part_indicatormap(data_dir, save_dir, mdim, rate, filter_size, stride):
    """
    This function is used for generating part indicator map for old data
    data_dir is the directory that you put all batch_datayes
    """
    allfile = iu.getfilelist(data_dir, 'data_batch_\d+')
    meta_path = iu.fullfile(data_dir, 'batches.meta')
    iu.ensure_dir(save_dir)
    if iu.exists(meta_path, 'file'):
        d_meta = myio.unpickle(meta_path)
        if 'savedata_info' not in d_meta:
            d_meta['savedata_info'] = dict()
            d_meta['savedata_info']['indmap_para'] = dict()
        d_meta['savedata_info']['indmap_para']['filter_size'] = filter_size
        d_meta['savedata_info']['indmap_para']['stride'] = stride
        d_meta['savedata_info']['indmap_para']['rate'] = rate
        myio.pickle(iu.fullfile(save_dir, 'batches.meta'), d_meta)
    for fn in allfile:
        print 'Processing %s' % fn
        d = myio.unpickle(iu.fullfile(data_dir, fn))
        ndata = d['data'].shape[-1]
        nparts = 7
        d['indmap'] = np.zeros((nparts, mdim[0], mdim[1], ndata),
                               dtype=np.bool)
        for i in range(ndata):
            jts = d['joints8'][..., i]
            d['indmap'][..., i] = HMLPE.create_part_indicatormap(
                jts, part_idx, mdim, rate, filter_size, stride)
        myio.pickle(iu.fullfile(save_dir, fn), d)
示例#5
0
 def generate_positive_data(self, generate_type, allfile=None):
     """
     generate_type = 'rt': random translation
                     'ct'  center block
     """
     if allfile is None:
         allfile = iu.getfilelist(self.imgdata_info['imgdatapath'],
                                  '\w+\.mat')
     print 'imgdatapath=%s, %d files are found' % (
         self.imgdata_info['imgdatapath'], len(allfile))
     iu.ensure_dir(self.savedata_info['savedir'])
     self.batch_id = self.savedata_info['start_patch_id']
     self.init_meta(generate_type)
     print self.meta
     np.random.seed(7)
     for fn in allfile:
         print 'Processing %s ' % fn
         mpath = iu.fullfile(self.imgdata_info['imgdatapath'], fn)
         self.generate_positive_data_from_mat(generate_type,
                                              iu.fullfile(mpath))
     if self.meta['ndata'] > 0:
         self.meta['data_mean'] = self.meta['data_sum'] / self.meta['ndata']
         self.meta['data_mean'] = self.meta['data_mean'].reshape((-1, 1))
     else:
         self.meta['data_mean'] = 0
     del self.meta['data_sum']
     myio.pickle(iu.fullfile(self.savedata_info['savedir'], 'batches.meta'),
                 self.meta)
示例#6
0
文件: hmlpe.py 项目: itsuper7/convnet
def merge_batch_data(data_dir_list, save_dir, is_symbolic = True, batch_start_num = 1):
    """
    This function will merge all the data_batches in data_dir into one folder
     and rename them accordining.
       Of cause, meta data will be updated 
    """
    import os
    import shutil
    iu.ensure_dir(save_dir)
    meta = None
    for ddir in data_dir_list:
        cur_meta = myio.unpickle(iu.fullfile(ddir, 'batches.meta'))    
        meta = HMLPE.merge_meta(meta, cur_meta)

    myio.pickle(iu.fullfile(save_dir, 'batches.meta'), meta)
    cur_id = batch_start_num
    for ddir in data_dir_list:
        all_file = iu.getfilelist(ddir, 'data_batch_\d+')
        print 'I find %d batches in %s' % (len(all_file), ddir)
        if is_symbolic:
            for fn in all_file:
                sn = iu.fullfile(save_dir, 'data_batch_%d' %  cur_id)
                if iu.exists(sn, 'file'):
                    os.remove(sn)
                os.symlink(iu.fullfile(ddir, fn), sn)
                cur_id = cur_id + 1
        else:
            for fn in all_file:
                shutil.copyfile(iu.fullfile(ddir, fn), iu.fullfile(save_dir, 'data_batch_%d' %  cur_id))
                cur_id = cur_id + 1
示例#7
0
文件: hmlpe.py 项目: itsuper7/convnet
def add_part_indicatormap(data_dir, save_dir, mdim, rate, filter_size, stride):
    """
    This function is used for generating part indicator map for old data
    data_dir is the directory that you put all batch_datayes
    """
    allfile = iu.getfilelist(data_dir, 'data_batch_\d+')
    meta_path = iu.fullfile(data_dir, 'batches.meta')
    iu.ensure_dir(save_dir)
    if iu.exists(meta_path, 'file'): 
        d_meta = myio.unpickle(meta_path)
        if 'savedata_info' not in d_meta:
            d_meta['savedata_info'] = dict()
            d_meta['savedata_info']['indmap_para'] = dict()
        d_meta['savedata_info']['indmap_para']['filter_size'] = filter_size
        d_meta['savedata_info']['indmap_para']['stride'] = stride
        d_meta['savedata_info']['indmap_para']['rate'] = rate 
        myio.pickle(iu.fullfile(save_dir, 'batches.meta'), d_meta)        
    for fn in allfile:
        print 'Processing %s' % fn
        d = myio.unpickle(iu.fullfile(data_dir, fn))
        ndata = d['data'].shape[-1]
        nparts = 7
        d['indmap'] = np.zeros((nparts, mdim[0], mdim[1], ndata), dtype=np.bool) 
        for i in range(ndata):
            jts = d['joints8'][...,i]
            d['indmap'][...,i] = HMLPE.create_part_indicatormap(jts, part_idx,  mdim, rate, filter_size, stride)
        myio.pickle(iu.fullfile(save_dir, fn), d)
示例#8
0
def merge_batch_data(data_dir_list,
                     save_dir,
                     is_symbolic=True,
                     batch_start_num=1):
    """
    This function will merge all the data_batches in data_dir into one folder
     and rename them accordining.
       Of cause, meta data will be updated 
    """
    import os
    import shutil
    iu.ensure_dir(save_dir)
    meta = None
    for ddir in data_dir_list:
        cur_meta = myio.unpickle(iu.fullfile(ddir, 'batches.meta'))
        meta = HMLPE.merge_meta(meta, cur_meta)

    myio.pickle(iu.fullfile(save_dir, 'batches.meta'), meta)
    cur_id = batch_start_num
    for ddir in data_dir_list:
        all_file = iu.getfilelist(ddir, 'data_batch_\d+')
        print 'I find %d batches in %s' % (len(all_file), ddir)
        if is_symbolic:
            for fn in all_file:
                sn = iu.fullfile(save_dir, 'data_batch_%d' % cur_id)
                if iu.exists(sn, 'file'):
                    os.remove(sn)
                os.symlink(iu.fullfile(ddir, fn), sn)
                cur_id = cur_id + 1
        else:
            for fn in all_file:
                shutil.copyfile(
                    iu.fullfile(ddir, fn),
                    iu.fullfile(save_dir, 'data_batch_%d' % cur_id))
                cur_id = cur_id + 1
示例#9
0
def ReadDataToCifarDic(imgdir, example_path, data_category, max_per_batch,
                       save_dir):
    """
        read all data in 'data_category'
        into cifar style dictionary                
    """
    import scipy.io as sio
    import iutils as iu
    import cifar
    import iconvnet_datacvt as icvt
    from iutils import imgproc as imgproc
    from PIL import Image
    if data_category != 'istest':
        print 'I haven' 't implement joints8 part '
        #raise ModecError('I haven''t implement joints8 part ')
    all_examples = sio.loadmat(example_path)['examples']
    examples = ExtractSubExample(all_examples, data_category)

    ndata = examples.shape[-1]
    iu.ensure_dir(save_dir)
    s_first = min(ndata, max_per_batch)
    d = cifar.PrepareData(s_first)
    d['oridet'] = np.ndarray((4, s_first), dtype=np.int)
    d['filepath'] = [str() for x in range(s_first)]
    d['coords'] = np.ndarray((2, 29, s_first), dtype=np.float32)
    tdsize = cifar.img_size[0]  # make sure img_size[0] == img_size[1]

    j = 0
    bid = 1
    for i in range(ndata):
        if j == max_per_batch:
            icvt.ut.pickle(iu.fullfile(save_dir, 'data_batch_' + str(bid)), \
                           d)
            bid = bid + 1
            j = 0
            if ndata - i < max_per_batch:
                d = cifar.PrepareData(ndata - i)
        fn = str(examples[i]['filepath'][0])
        fp = iu.fullfile(imgdir, fn)
        img = Image.open(fp)
        tbox = examples[i]['torsobox'][0].reshape((4))
        d['filepath'][j] = fp
        d['oridet'][..., j] = tbox
        d['oribbox'][..., j] = bbox = ExtendBndbox(tbox, img.size)
        d['coords'][..., j] = examples[i]['coords']
        orijoints8 = CvtCoordsToJoints(examples[i]['coords']).reshape(
            (8, 2), order='C')
        d['joints8'][..., j] = TransformPoints(orijoints8, bbox,
                                               cifar.img_size).reshape(
                                                   (16), order='C')
        img = imgproc.ensure_rgb(np.asarray(img))
        sub_img = Image.fromarray(img[bbox[1]:bbox[3], bbox[0]:bbox[2], :])
        data_img = np.asarray(sub_img.resize((cifar.img_size[0],\
                                               cifar.img_size[1]))).reshape((cifar.dim_data),order='F')
        d['data'][..., j] = data_img
        j = j + 1
    icvt.ut.pickle(iu.fullfile(save_dir, 'data_batch_' + str(bid)), d)
示例#10
0
 def estimate_pose(self):
     import scipy.io as sio
     from mpl_toolkits.mplot3d import Axes3D
     self.crop_cls = None
     if self.op.get_value('crop_image'):
         if self.op.get_value('crop_image') in self.cropper_dict:
             self.crop_cls = self.cropper_dict[self.op.get_value(
                 'crop_image')]()
     params = self.parse_params(self.op.get_value('do_pose_evaluation'))
     input_params = self.parse_params(self.op.get_value('inputstream'))
     input_type = str(input_params[0])
     if input_type == 'imgcamera':
         ca = ImageCamera(str(input_params[1]))
     else:
         ca = CVCamera()
     output_layer_idx = self.get_layer_idx(params[0])
     if len(params) == 1:
         target_type = 'h36m_body'
         gt_idx = 1
     else:
         target_type = params[1]
         gt_idx = int(params[2])
     data_dim = self.model_state['layers'][output_layer_idx]['outputs']
     if 'feature_name_3d' not in dir(self.test_data_provider):
         is_relskel = False
     else:
         is_relskel = (self.test_data_provider.feature_name_3d ==
                       'RelativeSkel_Y3d_mono_body')
     print 'I am using %s' % ('RelSkel' if is_relskel else 'Rel')
     convert_dic = {'h36m_body':self.convert_relskel2rel, \
                    'humaneva_body':self.convert_relskel2rel_eva}
     input_dic = {'data_dim':data_dim, 'target_type':target_type, \
                  'output_layer_idx':output_layer_idx}
     output_dic = {'est': None}
     input_dic['convert_dic'] = convert_dic
     input_dic['camera'] = ca
     input_dic['next_data'], input_dic['raw_img'], input_dic['bnd'] = \
       self.get_hmlpe_posedata_from_camera(input_dic['camera'], self.test_data_provider)
     input_dic['camera_fig'] = plt.figure(0)
     input_dic['camera_im'] = plt.imshow(input_dic['raw_img'])
     input_dic['pose_fig'] = plt.figure(1)
     if target_type == 'hmlpe_2d':
         input_dic['pose_ax'] = plt.imshow(input_dic['raw_img'])
     else:
         input_dic['pose_ax'] = input_dic['pose_fig'].add_subplot(
             111, projection='3d')
         input_dic['pose_ax'].plot(range(10), range(10), range(10))
         input_dic['pose_ax'].view_init(azim=-94, elev=-71)
     if self.op.get_value('outputdir'):
         input_dic['outputdir'] = self.op.get_value('outputdir')
         input_dic['savecnt'] = 0
         iu.ensure_dir(input_dic['outputdir'])
     ani_func = lambda *x: self.estimate_pose_main_process(
         input_dic, output_dic)
     dummy = animation.FuncAnimation(input_dic['camera_fig'], ani_func, \
                             interval=5, blit=True, repeat=False)
     plt.show()
示例#11
0
文件: modec.py 项目: itsuper7/convnet
def ReadDataToHMLPEDic(imgdir,example_path, data_category, max_per_batch,save_dir):
    """
    Read all data in 'data_category'
    into HMLPE dictionary
    There is no need to generating training data, since they can be generated in
    hmlpe.py 
    """
    import scipy.io as sio
    import iutils as iu
    import iread.myio as mio
    import iread.hmlpe as hmlpe
    import imgproc
    from PIL import Image
    if data_category != 'istest':
        print 'Warn: The correctness of data type %s is not guaranteed' % data_category
    all_example = sio.loadmat(example_path)['examples']
    examples = ExtractSubExample(all_example, data_category)
    ndata = examples.shape[-1]
    iu.ensure_dir(save_dir)
    buf_size = min(ndata, max_per_batch)
    dimdic = {'data':(112,112,3), 'part_indmap':(8,8), 'joint_indmap':(8,8)} 
    nparts  = 7
    njoints = 8
    d = hmlpe.HMLPE.prepare_savebuffer(dimdic, buf_size, nparts, njoints)
    d['oridet'] = np.zeros((4,buf_size), dtype=np.int)
    d['coords'] = np.ndarray((2,29, buf_size), dtype=np.float32)
    tdsize = dimdic['data'][0]
    dsize = dimdic['data'][0] * dimdic['data'][1] * dimdic['data'][2]
    d['data'] = d['data'].reshape((dsize, -1),order='F')
    d['is_positive'][:] = True
    d['is_mirror'][:] = False
    bid = 1
    j = 0
    for i in range(ndata):
        if j == max_per_batch:
           mio.pickle(iu.fullfile(save_dir, 'data_batch_%d' % bid), d)
           bid = bid + 1
           if ndata - i < max_per_batch:
               d = hmlpe.HMLPE.prepare_savebuffer(dimdic, buf_size, nparts, njoints)
        fp = iu.fullfile(imgdir, str(examples[i]['filepath'][0]))
        img = Image.open(fp)
        tbox = examples[i]['torsobox'][0].reshape((4))
        d['filenames'][j] = fp
        d['coords'][...,j] = examples[i]['coords']
        d['oribbox'][...,j] = bbox = ExtendBndbox(tbox, img.size) 
        orijoints8 = CvtCoordsToJoints(examples[i]['coords']).reshape((8,2),order='C') - 1 # to python stype 0-idx
        d['joints8'][...,j] = TransformPoints(orijoints8, bbox, dimdic['data']).reshape((8,2),order='C')
        imgarr = imgproc.ensure_rgb(np.asarray(img))
        sub_img = Image.fromarray(imgarr[bbox[1]:bbox[3], bbox[0]:bbox[2],:])
        data_img = np.asarray(sub_img.resize((dimdic['data'][0], dimdic['data'][1]))).reshape((dsize),order='F') 
        d['data'][...,j] = data_img
        d['indmap'][...,j] = hmlpe.HMLPE.create_part_indicatormap(d['joints8'][...,j], hmlpe.part_idx, dimdic['part_indmap'], 0.3, 30.0,  12.0)
        d['joint_indmap'][...,j] = hmlpe.HMLPE.create_joint_indicatormap(d['joints8'][...,j], dimdic['joint_indmap'], 30.0, 12.0)
        d['jointmasks'][...,j] = hmlpe.HMLPE.makejointmask(dimdic['data'], d['joints8'][...,j])
        j = j + 1
    mio.pickle(iu.fullfile(save_dir, 'data_batch_%d' % bid), d)
示例#12
0
def process():
    train_dp, test_dp = create_dp2()
    layers, eval_net, train_net = create_layers2()
    solver_params = {'num_epoch':200, 'save_path':'/opt/visal/tmp/for_sijin/tmp/itheano_test_act14',
                     'testing_freq':1, 'K_candidate':2000, 'max_num':10
    }
    iu.ensure_dir(solver_params['save_path'])
    solvers = MMLSSolver([eval_net, train_net], train_dp, test_dp, solver_params)
    GraphParser.print_graph_connections(layers)
    solvers.train()
示例#13
0
文件: modec.py 项目: itsuper7/convnet
def ReadDataToCifarDic(imgdir,example_path, data_category, max_per_batch,save_dir):
    """
        read all data in 'data_category'
        into cifar style dictionary                
    """
    import scipy.io as sio
    import iutils as iu
    import cifar
    import iconvnet_datacvt as icvt
    from iutils import imgproc as imgproc
    from PIL import Image
    if data_category != 'istest':
        print 'I haven''t implement joints8 part '
        #raise ModecError('I haven''t implement joints8 part ')
    all_examples = sio.loadmat(example_path)['examples']
    examples = ExtractSubExample(all_examples, data_category)

    ndata = examples.shape[-1]
    iu.ensure_dir(save_dir)
    s_first = min(ndata, max_per_batch)
    d = cifar.PrepareData(s_first)
    d['oridet'] = np.ndarray((4,s_first),dtype=np.int)
    d['filepath'] = [str() for x in range(s_first)]
    d['coords'] = np.ndarray((2,29,s_first),dtype=np.float32)
    tdsize= cifar.img_size[0] # make sure img_size[0] == img_size[1]
    
    j = 0
    bid = 1
    for i in range(ndata):
        if j == max_per_batch:
            icvt.ut.pickle(iu.fullfile(save_dir, 'data_batch_' + str(bid)), \
                           d)
            bid = bid + 1
            j = 0
            if ndata - i < max_per_batch:
                d = cifar.PrepareData(ndata-i)                
        fn = str(examples[i]['filepath'][0])
        fp = iu.fullfile(imgdir, fn)
        img = Image.open(fp)
        tbox = examples[i]['torsobox'][0].reshape((4))
        d['filepath'][j] = fp
        d['oridet'][...,j] = tbox
        d['oribbox'][...,j] = bbox = ExtendBndbox(tbox,img.size)
        d['coords'][...,j] = examples[i]['coords']
        orijoints8 = CvtCoordsToJoints(examples[i]['coords']).reshape((8,2),order='C')
        d['joints8'][...,j] = TransformPoints(orijoints8, bbox,cifar.img_size).reshape((16),order='C')
        img = imgproc.ensure_rgb(np.asarray(img))
        sub_img = Image.fromarray(img[bbox[1]:bbox[3], bbox[0]:bbox[2],:])
        data_img = np.asarray(sub_img.resize((cifar.img_size[0],\
                                               cifar.img_size[1]))).reshape((cifar.dim_data),order='F')
        d['data'][...,j] = data_img
        j = j + 1
    icvt.ut.pickle(iu.fullfile(save_dir, 'data_batch_' + str(bid)),d)
示例#14
0
 def estimate_pose(self):
     import scipy.io as sio
     from mpl_toolkits.mplot3d import Axes3D
     self.crop_cls = None
     if self.op.get_value('crop_image'):
         if self.op.get_value('crop_image') in self.cropper_dict:
             self.crop_cls = self.cropper_dict[self.op.get_value('crop_image')]()
     params = self.parse_params(self.op.get_value('do_pose_evaluation'))
     input_params = self.parse_params(self.op.get_value('inputstream'))
     input_type = str(input_params[0])
     if input_type == 'imgcamera':
         ca = ImageCamera(str(input_params[1]))
     else:
         ca = CVCamera()
     output_layer_idx = self.get_layer_idx(params[0])
     if len(params) == 1:
         target_type = 'h36m_body'
         gt_idx = 1
     else:
         target_type = params[1]
         gt_idx = int(params[2])
     data_dim = self.model_state['layers'][output_layer_idx]['outputs']
     if 'feature_name_3d' not in dir(self.test_data_provider):
         is_relskel = False
     else:
         is_relskel = (self.test_data_provider.feature_name_3d == 'RelativeSkel_Y3d_mono_body')
     print 'I am using %s' % ('RelSkel' if is_relskel else 'Rel')
     convert_dic = {'h36m_body':self.convert_relskel2rel, \
                    'humaneva_body':self.convert_relskel2rel_eva}
     input_dic = {'data_dim':data_dim, 'target_type':target_type, \
                  'output_layer_idx':output_layer_idx}
     output_dic = {'est':None}
     input_dic['convert_dic'] = convert_dic
     input_dic['camera'] = ca
     input_dic['next_data'], input_dic['raw_img'], input_dic['bnd'] = \
       self.get_hmlpe_posedata_from_camera(input_dic['camera'], self.test_data_provider)
     input_dic['camera_fig'] = plt.figure(0)
     input_dic['camera_im'] = plt.imshow(input_dic['raw_img'])
     input_dic['pose_fig'] = plt.figure(1)
     if target_type == 'hmlpe_2d':
         input_dic['pose_ax'] = plt.imshow(input_dic['raw_img'])
     else:
         input_dic['pose_ax'] = input_dic['pose_fig'].add_subplot(111,projection='3d')
         input_dic['pose_ax'].plot(range(10),range(10),range(10))
         input_dic['pose_ax'].view_init(azim=-94, elev=-71)
     if self.op.get_value('outputdir'):
         input_dic['outputdir'] = self.op.get_value('outputdir')
         input_dic['savecnt'] = 0
         iu.ensure_dir(input_dic['outputdir'])
     ani_func = lambda *x: self.estimate_pose_main_process(input_dic, output_dic)
     dummy = animation.FuncAnimation(input_dic['camera_fig'], ani_func, \
                             interval=5, blit=True, repeat=False)
     plt.show()        
示例#15
0
def process():
    train_dp, test_dp = create_dp2()
    layers, eval_net, train_net = create_layers2()
    solver_params = {
        'num_epoch': 200,
        'save_path': '/opt/visal/tmp/for_sijin/tmp/itheano_test_act14',
        'testing_freq': 1,
        'K_candidate': 2000,
        'max_num': 10
    }
    iu.ensure_dir(solver_params['save_path'])
    solvers = MMLSSolver([eval_net, train_net], train_dp, test_dp,
                         solver_params)
    GraphParser.print_graph_connections(layers)
    solvers.train()
示例#16
0
def cvt1(source_exp_name, target_exp_name):
    print '''
    SP_t004_act_14:
    source meta [rel_gt,  img_feature_accv_fc_j0,  relskel_feature_t004]
    Raw_SP_t004_act_14:
    target meta [rel_gt,  img_feature_accv_fc_j0,  rel_gt]
    '''
    base_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/'
    source_meta = mio.unpickle(
        iu.fullfile(base_path, 'folder_%s' % source_exp_name, 'batches.meta'))
    target_meta_folder = iu.fullfile(base_path, 'folder_%s' % target_exp_name)
    target_meta_path = iu.fullfile(target_meta_folder, 'batches.meta')
    d = source_meta.copy()
    print d.keys()
    d['feature_list'] = [source_meta['feature_list'][k] for k in [0, 1, 0]]
    d['feature_dim'] = [source_meta['feature_dim'][k] for k in [0, 1, 0]]
    # print d['info']
    print 'folder :{}\n path {}'.format(target_meta_folder, target_meta_path)
    iu.ensure_dir(target_meta_folder)
    mio.pickle(target_meta_path, d)
示例#17
0
def cvt1(source_exp_name, target_exp_name):
    print '''
    SP_t004_act_14:
    source meta [rel_gt,  img_feature_accv_fc_j0,  relskel_feature_t004]
    Raw_SP_t004_act_14:
    target meta [rel_gt,  img_feature_accv_fc_j0,  rel_gt]
    '''
    base_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/'
    source_meta = mio.unpickle(iu.fullfile(base_path, 'folder_%s' % source_exp_name,
                                           'batches.meta'))
    target_meta_folder = iu.fullfile(base_path, 'folder_%s' % target_exp_name) 
    target_meta_path =  iu.fullfile(target_meta_folder, 'batches.meta') 
    d = source_meta.copy()
    print d.keys()
    d['feature_list'] = [source_meta['feature_list'][k] for k in [0, 1, 0]]
    d['feature_dim'] = [source_meta['feature_dim'][k] for k in [0, 1, 0]]
    # print d['info']
    print 'folder :{}\n path {}'.format(target_meta_folder, target_meta_path)
    iu.ensure_dir(target_meta_folder)
    mio.pickle(target_meta_path, d)
示例#18
0
def pack_01():
    """
    input:   fc_j0 feature,  rel_pose
    outputs: rel_pose, fc_j0_feature
    """
    source_feature_network_path = '/opt/visal/tmp/for_sijin/Data/saved/theano_models/2015_02_02_acm_act_14_exp_2_19_graph_0012/'
    source_meta_path = '/opt/visal/tmp/for_sijin/tmp/tmp_saved'

    exp_meta_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_ASM_act_14_exp_2/batches.meta'
    save_path = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp/folder_FCJ0_act_14'
    feature_name = 'Relative_Y3d_mono_body'
    res = dict()
    exp_meta = mio.unpickle(exp_meta_path)
    source_meta = dutils.collect_feature_meta(source_meta_path)
    rel_pose = exp_meta[feature_name]
    fc_j0_feature = source_meta['feature_list'][1]
    rel_gt = source_meta['feature_list'][0]

    diff = rel_gt.reshape((-1, rel_gt.shape[-1]), order='F') * 1200 - rel_pose
    print 'diff is {}'.format(diff.flatten().sum())
    feature_list = [rel_pose, fc_j0_feature]
    feature_dim = [rel_pose.shape[0], fc_j0_feature.shape[0]]
    print feature_dim, '<<<feature dim'
    res = {
        'feature_list': feature_list,
        'feature_dim': feature_dim,
        'info': {
            'indexes': source_meta['info']['indexes'],
            'max_depth': 1200.0
        }
    }
    indexes = res['info']['indexes']
    res['info']['soure_feature_network_path'] = source_feature_network_path
    print indexes[:10], min(indexes), max(indexes)
    print 'The number of data is {} == {}'.format(indexes.size,
                                                  feature_list[0].shape[-1])
    iu.ensure_dir(save_path)
    mio.pickle(iu.fullfile(save_path, 'batches.meta'), res)
示例#19
0
 def generate_data(self, generate_type, allfile = None):
     """
     generate_type = 'rt' only
     """
     if allfile is None:
         allfile = iu.getfilelist( self.imgdata_info['imgdata_path'], '\w+\.mat')
     print 'imgdatapath=%s, %d files are found' % (self.imgdata_info['imgdata_path'], len(allfile))
     iu.ensure_dir(self.savedata_info['savedir'])
     self.batch_id = self.savedata_info['start_patch_id']
     ndata = 0
     self.meta = {'imgdata_info':self.imgdata_info,'savedata_info':self.savedata_info}
     self.meta['num_vis'] = iu.prod(self.savedata_info['newdim'])
     self.meta['data_sum'] = 0
     self.meta['ndata'] = 0
     self.meta['nparts'] = len(part_idx) 
     for fn in allfile:
         if generate_type == 'rt':
             mpath = iu.fullfile(self.imgdata_info['imgdata_path'], fn)
             self.generate_rt_data(iu.fullfile(mpath))
     if self.meta['ndata'] > 0:
         self.meta['data_mean']  = self.meta['data_sum'] / self.meta['ndata']
     del self.meta['data_sum']
     myio.pickle(iu.fullfile(self.savedata_info['savedir'], 'batches.meta'), self.meta)
示例#20
0
文件: hmlpe.py 项目: itsuper7/convnet
 def generate_positive_data(self, generate_type, allfile = None):
     """
     generate_type = 'rt': random translation
                     'ct'  center block
     """
     if allfile is None:
         allfile = iu.getfilelist( self.imgdata_info['imgdatapath'], '\w+\.mat')
     print 'imgdatapath=%s, %d files are found' % (self.imgdata_info['imgdatapath'], len(allfile))
     iu.ensure_dir(self.savedata_info['savedir'])
     self.batch_id = self.savedata_info['start_patch_id']
     self.init_meta(generate_type)
     print self.meta
     np.random.seed(7)
     for fn in allfile:
         print 'Processing %s ' % fn
         mpath = iu.fullfile(self.imgdata_info['imgdatapath'], fn)
         self.generate_positive_data_from_mat(generate_type ,iu.fullfile(mpath))
     if self.meta['ndata'] > 0:
         self.meta['data_mean']  = self.meta['data_sum'] / self.meta['ndata']
         self.meta['data_mean'] = self.meta['data_mean'].reshape((-1,1))
     else:
         self.meta['data_mean'] = 0
     del self.meta['data_sum']
     myio.pickle(iu.fullfile(self.savedata_info['savedir'], 'batches.meta'), self.meta)
示例#21
0
def shuffle_data(source_dir, target_dir, max_per_file=4000):
    """
    This function will shuflle all the data in source_dir
    and save it to target_dir
    """
    if source_dir == target_dir:
        raise HMLPEError('source dir can not be the same as target dir')
    import shutil
    import sys
    iu.ensure_dir(target_dir)
    shutil.copy(iu.fullfile(source_dir, 'batches.meta'), \
                iu.fullfile(target_dir, 'batches.meta'))
    meta = myio.unpickle(iu.fullfile(source_dir, 'batches.meta'))
    ndata = meta['ndata']
    nbatch = (ndata - 1) / max_per_file + 1
    nparts = meta['nparts']
    njoints = meta['njoints']
    newdim = meta['savedata_info']['newdim']
    filter_size = meta['savedata_info']['indmap_para']['filter_size']
    stride = meta['savedata_info']['indmap_para']['stride']
    joint_filter_size = meta['savedata_info']['indmap_para'][
        'joint_filter_size']
    joint_stride = meta['savedata_info']['indmap_para']['joint_stride']
    mdim = HMLPE.get_indmapdim(newdim, filter_size, stride)
    jtmdim = HMLPE.get_indmapdim(newdim, joint_filter_size, joint_stride)
    print('There are %d data in total, I need %d batch to hold it' %
          (ndata, nbatch))
    print 'Begin creating empty files'
    rest = ndata
    d = HMLPE.prepare_savebuffer({'data':newdim, 'part_indmap':mdim, \
                                       'joint_indmap': jtmdim}, max_per_file, nparts, njoints)
    HMLPE.adjust_savebuffer_shape(d)
    for b in range(nbatch):
        cur_n = min(max_per_file, rest)
        if b != nbatch - 1:
            saved = d
        else:
            saved = HMLPE.prepare_savebuffer({'data':newdim, 'part_indmap':mdim, \
                                       'joint_indmap': jtmdim}, cur_n, nparts, njoints)
            HMLPE.adjust_savebuffer_shape(saved)
        myio.pickle(iu.fullfile(target_dir, 'data_batch_%d' % (b + 1)), saved)
        rest = rest - cur_n
    print 'End creating'
    allbatchfn = iu.getfilelist(source_dir, 'data_batch_\d+')
    np.random.seed(7)
    perm = range(ndata)
    np.random.shuffle(perm)
    buf_cap = 12  # store six batch at most
    nround = (nbatch - 1) / buf_cap + 1
    for rd in range(nround):
        print('Round %d of %d' % (rd, nround))
        buf = dict()
        offset = 0
        for fn in allbatchfn:
            print('Processing %s' % fn)
            d = myio.unpickle(iu.fullfile(source_dir, fn))
            cur_n = d['data'].shape[-1]
            for b in range(rd * buf_cap, min(nbatch, (rd + 1) * buf_cap)):
                sys.stdout.write('\rpadding %d of %d' % (b + 1, nbatch))
                sys.stdout.flush()
                sidx = b * max_per_file
                eidx = min(ndata, sidx + max_per_file)
                cur_idx_list = [
                    i for i in range(cur_n)
                    if perm[offset + i] >= sidx and perm[offset + i] < eidx
                ]
                if len(cur_idx_list) == 0:
                    continue
                if not b in buf:
                    dsave = myio.unpickle(
                        iu.fullfile(target_dir, 'data_batch_%d' % (b + 1)))
                    buf[b] = dsave
                else:
                    dsave = buf[b]
                save_idx_list = [perm[x + offset] - sidx for x in cur_idx_list]
                HMLPE.selective_copydic(d, dsave, cur_idx_list, save_idx_list)
                # myio.pickle(iu.fullfile(target_dir, 'data_batch_%d' % (b+1)), dsave)
            print 'Finished %s' % fn
            offset = offset + cur_n
        for b in range(rd * buf_cap, min(nbatch, (rd + 1) * buf_cap)):
            myio.pickle(iu.fullfile(target_dir, 'data_batch_%d' % (b + 1)),
                        buf[b])
示例#22
0
文件: hmlpe.py 项目: itsuper7/convnet
def shuffle_data(source_dir, target_dir, max_per_file = 4000):
    """
    This function will shuflle all the data in source_dir
    and save it to target_dir
    """
    if source_dir == target_dir:
        raise HMLPEError('source dir can not be the same as target dir')
    import shutil
    import sys
    iu.ensure_dir( target_dir)
    shutil.copy(iu.fullfile(source_dir, 'batches.meta'), \
                iu.fullfile(target_dir, 'batches.meta'))
    meta = myio.unpickle(iu.fullfile(source_dir, 'batches.meta'))
    ndata = meta['ndata']
    nbatch = (ndata  - 1) / max_per_file + 1
    nparts = meta['nparts']
    njoints = meta['njoints']
    newdim = meta['savedata_info']['newdim']
    filter_size = meta['savedata_info']['indmap_para']['filter_size']
    stride = meta['savedata_info']['indmap_para']['stride']
    joint_filter_size = meta['savedata_info']['indmap_para']['joint_filter_size']
    joint_stride = meta['savedata_info']['indmap_para']['joint_stride']
    mdim = HMLPE.get_indmapdim(newdim, filter_size, stride)
    jtmdim = HMLPE.get_indmapdim(newdim, joint_filter_size, joint_stride)
    print('There are %d data in total, I need %d batch to hold it' %(ndata, nbatch))
    print 'Begin creating empty files'
    rest = ndata
    d = HMLPE.prepare_savebuffer({'data':newdim, 'part_indmap':mdim, \
                                       'joint_indmap': jtmdim}, max_per_file, nparts, njoints)
    HMLPE.adjust_savebuffer_shape(d)
    for b in range(nbatch):
        cur_n = min(max_per_file, rest)
        if b != nbatch - 1:
            saved = d
        else:
            saved = HMLPE.prepare_savebuffer({'data':newdim, 'part_indmap':mdim, \
                                       'joint_indmap': jtmdim}, cur_n, nparts, njoints)
            HMLPE.adjust_savebuffer_shape(saved)
        myio.pickle(iu.fullfile(target_dir, 'data_batch_%d' % (b + 1)), saved)
        rest = rest - cur_n
    print 'End creating'
    allbatchfn = iu.getfilelist(source_dir, 'data_batch_\d+')
    np.random.seed(7)
    perm = range(ndata)
    np.random.shuffle(perm)
    buf_cap = 12 # store six batch at most
    nround = (nbatch - 1)/buf_cap + 1
    for rd in range(nround):
        print ('Round %d of %d' % (rd,nround))
        buf = dict()
        offset = 0
        for fn in allbatchfn:
            print( 'Processing %s' % fn )
            d = myio.unpickle(iu.fullfile(source_dir, fn))
            cur_n = d['data'].shape[-1]
            for b in range(rd * buf_cap, min(nbatch, (rd+1)*buf_cap)):
                sys.stdout.write('\rpadding %d of %d' % (b + 1, nbatch))
                sys.stdout.flush() 
                sidx = b * max_per_file
                eidx = min(ndata, sidx + max_per_file)
                cur_idx_list = [i for i in range(cur_n) if perm[offset + i] >= sidx and perm[offset + i] < eidx]
                if len(cur_idx_list) == 0:
                    continue
                if not b in buf:
                    dsave = myio.unpickle(iu.fullfile(target_dir, 'data_batch_%d' % (b+1)))
                    buf[b] = dsave
                else:
                    dsave = buf[b]
                save_idx_list = [perm[ x + offset] - sidx for x in cur_idx_list]
                HMLPE.selective_copydic(d, dsave, cur_idx_list, save_idx_list)
                # myio.pickle(iu.fullfile(target_dir, 'data_batch_%d' % (b+1)), dsave)
            print 'Finished %s' % fn
            offset = offset + cur_n
        for b in range(rd * buf_cap, min(nbatch, (rd+1)*buf_cap)):
            myio.pickle(iu.fullfile(target_dir, 'data_batch_%d' % (b+1)), buf[b])
示例#23
0
    def generate_negative_data_from_image(self, generate_type, allfile=None):
        """
        generate_type = 'neg_sample'
        savedata_info should have 'neg_sample_num':
                      indicating sampling how many negative window per image
        If some image is small, then it will try to generate as much as possible
                      
        """
        import Image
        if allfile is None:
            allfile = iu.getfilelist(self.imgdata_info['imgdatapath'], \
                                     '\w+(\.png|\.jpg|\.pgm|.jpeg)')
        print 'imgdatapath=%s, %d images are found' % (
            self.imgdata_info['imgdatapath'], len(allfile))
        iu.ensure_dir(self.savedata_info['savedir'])
        savedir = self.savedata_info['savedir']
        self.batch_id = self.savedata_info['start_patch_id']
        self.init_meta(generate_type)
        print(self.meta)
        sample_num = self.savedata_info['neg_sample_num']
        totaldata = len(allfile) * sample_num
        self.meta['ndata'] = 0
        newdim = self.savedata_info['newdim']
        nparts = self.meta['nparts']
        njoints = self.meta['njoints']
        if njoints == 8:
            dicjtname = 'joints8'
        else:
            dicjtname = 'joints'
            #raise HMLPEError('njoints = %d are not supported yet' % njoints)
        filter_size = self.savedata_info['indmap_para']['filter_size']
        stride = self.savedata_info['indmap_para']['stride']
        #rate = self.savedata_info['indmap_para']['rate']
        mdim = self.get_indmapdim(newdim, filter_size, stride)
        self.meta['ind_dim']['part_indmap'] = mdim
        joint_filter_size = self.savedata_info['indmap_para'][
            'joint_filter_size']
        joint_stride = self.savedata_info['indmap_para']['joint_stride']
        jtmdim = self.get_indmapdim(newdim, joint_filter_size, joint_stride)
        self.meta['ind_dim']['joint_indmap'] = jtmdim
        per_size = min(totaldata, self.savedata_info['max_batch_size'])
        res = self.prepare_savebuffer({'data':newdim, 'part_indmap':mdim, \
                                       'joint_indmap': jtmdim}, per_size, nparts, njoints)
        res[dicjtname][:] = 0
        res['jointmasks'][:] = False
        res['indmap'][:] = False
        res['joint_indmap'][:] = False
        res['is_mirror'][:] = False
        res['is_positive'][:] = False
        pre_nc = 0
        nc = 0
        np.random.seed(7)
        for it, fn in enumerate(allfile):
            print('Processing %s' % fn)
            curimgpath = iu.fullfile(self.imgdata_info['imgdatapath'], fn)
            img = np.asarray(Image.open(curimgpath), dtype=np.uint8)
            imgdim = img.shape
            if imgdim[0] < newdim[0] or imgdim[1] < newdim[1]:
                print('small image, ignored')
                continue
            mesh = self.create_augumentation_mesh(imgdim, newdim,
                                                  generate_type)
            ts = min(len(mesh), sample_num)
            l = (np.random.permutation(range(len(mesh))))[:ts]
            for p in l:
                r, c = mesh[p]
                timg = img[r:r + newdim[0], c:c + newdim[0], :]
                res['data'][..., nc - pre_nc] = timg
                res['joint_sample_offset'][..., nc - pre_nc] = [c, r]
                res['filenames'][nc - pre_nc] = curimgpath
                res['oribbox'][..., nc - pre_nc] = [
                    c, r, c + newdim[1] - 1, r + newdim[0] - 1
                ]
                nc = nc + 1
            if sample_num + nc - pre_nc > per_size or it == len(allfile) - 1:
                tmpres = self.truncated_copydic(res, nc - pre_nc)
                tmpres['data'] = tmpres['data'].reshape((-1, nc - pre_nc),
                                                        order='F')
                self.meta['data_sum'] += tmpres['data'].sum(axis=1,
                                                            dtype=float)
                self.meta['ndata'] += nc - pre_nc
                savepath = iu.fullfile(self.savedata_info['savedir'], \
                                       self.savedata_info['savename'] + \
                                       '_' +  str(self.batch_id))
                myio.pickle(savepath, tmpres)
                self.batch_id = self.batch_id + 1
                pre_nc = nc
        if self.meta['ndata'] > 0:
            self.meta['data_mean'] = self.meta['data_sum'] / self.meta['ndata']
            self.meta['data_mean'] = self.meta['data_mean'].reshape((-1, 1),
                                                                    order='F')
        else:
            self.meta['data_mean'] = 0
        del self.meta['data_sum']

        myio.pickle(iu.fullfile(self.savedata_info['savedir'], 'batches.meta'),
                    self.meta)
示例#24
0
def ReadDataToHMLPEDic(imgdir, example_path, data_category, max_per_batch,
                       save_dir):
    """
    Read all data in 'data_category'
    into HMLPE dictionary
    There is no need to generating training data, since they can be generated in
    hmlpe.py 
    """
    import scipy.io as sio
    import iutils as iu
    import iread.myio as mio
    import iread.hmlpe as hmlpe
    import imgproc
    from PIL import Image
    if data_category != 'istest':
        print 'Warn: The correctness of data type %s is not guaranteed' % data_category
    all_example = sio.loadmat(example_path)['examples']
    examples = ExtractSubExample(all_example, data_category)
    ndata = examples.shape[-1]
    iu.ensure_dir(save_dir)
    buf_size = min(ndata, max_per_batch)
    dimdic = {
        'data': (112, 112, 3),
        'part_indmap': (8, 8),
        'joint_indmap': (8, 8)
    }
    nparts = 7
    njoints = 8
    d = hmlpe.HMLPE.prepare_savebuffer(dimdic, buf_size, nparts, njoints)
    d['oridet'] = np.zeros((4, buf_size), dtype=np.int)
    d['coords'] = np.ndarray((2, 29, buf_size), dtype=np.float32)
    tdsize = dimdic['data'][0]
    dsize = dimdic['data'][0] * dimdic['data'][1] * dimdic['data'][2]
    d['data'] = d['data'].reshape((dsize, -1), order='F')
    d['is_positive'][:] = True
    d['is_mirror'][:] = False
    bid = 1
    j = 0
    for i in range(ndata):
        if j == max_per_batch:
            mio.pickle(iu.fullfile(save_dir, 'data_batch_%d' % bid), d)
            bid = bid + 1
            if ndata - i < max_per_batch:
                d = hmlpe.HMLPE.prepare_savebuffer(dimdic, buf_size, nparts,
                                                   njoints)
        fp = iu.fullfile(imgdir, str(examples[i]['filepath'][0]))
        img = Image.open(fp)
        tbox = examples[i]['torsobox'][0].reshape((4))
        d['filenames'][j] = fp
        d['coords'][..., j] = examples[i]['coords']
        d['oribbox'][..., j] = bbox = ExtendBndbox(tbox, img.size)
        orijoints8 = CvtCoordsToJoints(examples[i]['coords']).reshape(
            (8, 2), order='C') - 1  # to python stype 0-idx
        d['joints8'][..., j] = TransformPoints(orijoints8, bbox,
                                               dimdic['data']).reshape(
                                                   (8, 2), order='C')
        imgarr = imgproc.ensure_rgb(np.asarray(img))
        sub_img = Image.fromarray(imgarr[bbox[1]:bbox[3], bbox[0]:bbox[2], :])
        data_img = np.asarray(
            sub_img.resize((dimdic['data'][0], dimdic['data'][1]))).reshape(
                (dsize), order='F')
        d['data'][..., j] = data_img
        d['indmap'][..., j] = hmlpe.HMLPE.create_part_indicatormap(
            d['joints8'][..., j], hmlpe.part_idx, dimdic['part_indmap'], 0.3,
            30.0, 12.0)
        d['joint_indmap'][..., j] = hmlpe.HMLPE.create_joint_indicatormap(
            d['joints8'][..., j], dimdic['joint_indmap'], 30.0, 12.0)
        d['jointmasks'][...,
                        j] = hmlpe.HMLPE.makejointmask(dimdic['data'],
                                                       d['joints8'][..., j])
        j = j + 1
    mio.pickle(iu.fullfile(save_dir, 'data_batch_%d' % bid), d)
示例#25
0
文件: hmlpe.py 项目: itsuper7/convnet
    def generate_negative_data_from_image(self, generate_type, allfile=None):
        """
        generate_type = 'neg_sample'
        savedata_info should have 'neg_sample_num':
                      indicating sampling how many negative window per image
        If some image is small, then it will try to generate as much as possible
                      
        """
        import Image
        if allfile is None:
            allfile = iu.getfilelist(self.imgdata_info['imgdatapath'], \
                                     '\w+(\.png|\.jpg|\.pgm|.jpeg)')
        print 'imgdatapath=%s, %d images are found' % (self.imgdata_info['imgdatapath'], len(allfile))
        iu.ensure_dir(self.savedata_info['savedir'])
        savedir = self.savedata_info['savedir']
        self.batch_id = self.savedata_info['start_patch_id']
        self.init_meta(generate_type)
        print(self.meta)
        sample_num = self.savedata_info['neg_sample_num']
        totaldata = len(allfile) * sample_num
        self.meta['ndata'] = 0
        newdim = self.savedata_info['newdim']
        nparts = self.meta['nparts']
        njoints = self.meta['njoints']
        if njoints == 8:
            dicjtname = 'joints8'
        else:
            dicjtname = 'joints'
            #raise HMLPEError('njoints = %d are not supported yet' % njoints)
        filter_size = self.savedata_info['indmap_para']['filter_size']
        stride =  self.savedata_info['indmap_para']['stride']
        #rate = self.savedata_info['indmap_para']['rate']
        mdim = self.get_indmapdim(newdim, filter_size, stride)
        self.meta['ind_dim']['part_indmap'] = mdim
        joint_filter_size = self.savedata_info['indmap_para']['joint_filter_size']
        joint_stride = self.savedata_info['indmap_para']['joint_stride']
        jtmdim = self.get_indmapdim(newdim, joint_filter_size, joint_stride)
        self.meta['ind_dim']['joint_indmap'] = jtmdim
        per_size = min(totaldata, self.savedata_info['max_batch_size'])
        res = self.prepare_savebuffer({'data':newdim, 'part_indmap':mdim, \
                                       'joint_indmap': jtmdim}, per_size, nparts, njoints)
        res[dicjtname][:] = 0
        res['jointmasks'][:] = False
        res['indmap'][:] = False
        res['joint_indmap'][:] = False
        res['is_mirror'][:] = False
        res['is_positive'][:] = False
        pre_nc = 0
        nc = 0
        np.random.seed(7)
        for it, fn in enumerate(allfile):
            print('Processing %s' % fn)
            curimgpath= iu.fullfile(self.imgdata_info['imgdatapath'], fn)
            img = np.asarray(Image.open(curimgpath), dtype=np.uint8)
            imgdim = img.shape
            if imgdim[0] < newdim[0] or imgdim[1] < newdim[1]:
                print('small image, ignored')
                continue
            mesh = self.create_augumentation_mesh(imgdim, newdim, generate_type)
            ts = min(len(mesh), sample_num)
            l = (np.random.permutation(range(len(mesh))))[:ts]
            for p in l:
                r, c = mesh[p]
                timg = img[r:r+newdim[0],c:c+newdim[0],:]
                res['data'][...,nc-pre_nc] = timg
                res['joint_sample_offset'][...,nc-pre_nc] = [c,r]
                res['filenames'][nc-pre_nc] = curimgpath
                res['oribbox'][...,nc-pre_nc] = [c,r,c+newdim[1]-1,r+newdim[0]-1]
                nc = nc + 1
            if sample_num + nc-pre_nc > per_size or it == len(allfile)-1:
                tmpres = self.truncated_copydic(res, nc-pre_nc)
                tmpres['data'] = tmpres['data'].reshape((-1,nc-pre_nc),order='F')
                self.meta['data_sum'] += tmpres['data'].sum(axis=1,dtype=float)
                self.meta['ndata'] += nc - pre_nc
                savepath = iu.fullfile(self.savedata_info['savedir'], \
                                       self.savedata_info['savename'] + \
                                       '_' +  str(self.batch_id))
                myio.pickle(savepath, tmpres)
                self.batch_id = self.batch_id + 1
                pre_nc = nc
        if self.meta['ndata'] > 0:
            self.meta['data_mean'] = self.meta['data_sum'] / self.meta['ndata']
            self.meta['data_mean'] = self.meta['data_mean'].reshape((-1,1),order='F')
        else:
            self.meta['data_mean'] = 0
        del self.meta['data_sum']

        myio.pickle(iu.fullfile(self.savedata_info['savedir'], 'batches.meta'), self.meta)