Пример #1
0
    def generate_sliding_detection_data_from_image(self, imgpath, scale_pairlist):
        from PIL import Image 
        import iutils as iu
        import imgproc
        self.init_meta('sliding')
        img = Image.open(imgpath)
        ndata = 0
        newdim = self.savedata_info['newdim']
        steps = self.imgdata_info['steps']
        # x1 is the size, x1 - 1 is the last pixel index 
        fx = lambda x1,x2: np.floor((x1  - newdim[1])/x2) + 1
        fy = lambda x1,x2: np.floor((x1  - newdim[0])/x2) + 1

        valid_idx = 0
        for s in scale_pairlist:
            ns = np.floor(np.asarray([img.size[0], img.size[1]]) * np.asarray([s[0], s[1]]))
            if ns[0] < newdim[1] or ns[1] < newdim[0]:
                break
            cur_n = fx(ns[0],steps[0]) * fy(ns[1], steps[1])
            valid_idx = valid_idx + 1
            ndata = ndata + cur_n 
        ndata = int(ndata)
        scale_pairlist = scale_pairlist[:valid_idx]
        print( 'Need to generate %d data' % ndata)
        filter_size = self.savedata_info['indmap_para']['filter_size']
        stride =  self.savedata_info['indmap_para']['stride']
        mdim = self.get_indmapdim(newdim, filter_size, stride)
        joint_filter_size = self.savedata_info['indmap_para']['joint_filter_size']
        joint_stride = self.savedata_info['indmap_para']['joint_stride']
        jtmdim = self.get_indmapdim(newdim, joint_filter_size, joint_stride)
        res = self.prepare_savebuffer({'data':newdim, 'part_indmap':mdim, \
                                       'joint_indmap': jtmdim}, ndata, self.meta['nparts'], self.meta['njoints'])
        dicjtname = 'joints8' if self.meta['njoints'] == 8 else 'joints'
        res[dicjtname][:] = 0
        res['jointmasks'][:] = False
        res['indmap'][:] = False
        res['joint_indmap'][:] = False
        res['is_mirror'][:] = False
        res['is_positive'][:] = False
        res['slide_location'] = np.zeros((2,ndata),dtype=np.float)
        res['scale'] = np.zeros((2,ndata), dtype=np.float)
        res['filenames'] = [imgpath for x in range(ndata)]
        idx = 0
        dimX = iu.prod(newdim)

        for s in scale_pairlist:
            ns = np.floor(np.asarray([img.size[0], img.size[1]]) * np.asarray([s[0], s[1]]))
            ns = (int(ns[0]),int(ns[1]))
            nimg = img.resize((ns[0],ns[1]))
            arrimg = imgproc.ensure_rgb(np.asarray(nimg))
            for x in range(0, ns[0] - newdim[1] + 1, steps[0]):
                for y in range(0, ns[1] - newdim[0] + 1, steps[1]):
                    res['scale'][...,idx] = np.asarray([s[0],s[1]]).reshape((2))
                    res['slide_location'][...,idx] = np.asarray([x,y]).reshape((2))
                    res['data'][...,idx] = arrimg[y:y+newdim[0],x:x+newdim[1],:]
                    idx = idx + 1
        if idx != ndata:
            raise HMLPEError('Number of data is not consistent')
        res['data'] = res['data'].reshape((-1,ndata),order='F')
        return res
Пример #2
0
def ReadDataToCifarDic(imgdir, example_path, data_category, max_per_batch,
                       save_dir):
    """
        read all data in 'data_category'
        into cifar style dictionary                
    """
    import scipy.io as sio
    import iutils as iu
    import cifar
    import iconvnet_datacvt as icvt
    from iutils import imgproc as imgproc
    from PIL import Image
    if data_category != 'istest':
        print 'I haven' 't implement joints8 part '
        #raise ModecError('I haven''t implement joints8 part ')
    all_examples = sio.loadmat(example_path)['examples']
    examples = ExtractSubExample(all_examples, data_category)

    ndata = examples.shape[-1]
    iu.ensure_dir(save_dir)
    s_first = min(ndata, max_per_batch)
    d = cifar.PrepareData(s_first)
    d['oridet'] = np.ndarray((4, s_first), dtype=np.int)
    d['filepath'] = [str() for x in range(s_first)]
    d['coords'] = np.ndarray((2, 29, s_first), dtype=np.float32)
    tdsize = cifar.img_size[0]  # make sure img_size[0] == img_size[1]

    j = 0
    bid = 1
    for i in range(ndata):
        if j == max_per_batch:
            icvt.ut.pickle(iu.fullfile(save_dir, 'data_batch_' + str(bid)), \
                           d)
            bid = bid + 1
            j = 0
            if ndata - i < max_per_batch:
                d = cifar.PrepareData(ndata - i)
        fn = str(examples[i]['filepath'][0])
        fp = iu.fullfile(imgdir, fn)
        img = Image.open(fp)
        tbox = examples[i]['torsobox'][0].reshape((4))
        d['filepath'][j] = fp
        d['oridet'][..., j] = tbox
        d['oribbox'][..., j] = bbox = ExtendBndbox(tbox, img.size)
        d['coords'][..., j] = examples[i]['coords']
        orijoints8 = CvtCoordsToJoints(examples[i]['coords']).reshape(
            (8, 2), order='C')
        d['joints8'][..., j] = TransformPoints(orijoints8, bbox,
                                               cifar.img_size).reshape(
                                                   (16), order='C')
        img = imgproc.ensure_rgb(np.asarray(img))
        sub_img = Image.fromarray(img[bbox[1]:bbox[3], bbox[0]:bbox[2], :])
        data_img = np.asarray(sub_img.resize((cifar.img_size[0],\
                                               cifar.img_size[1]))).reshape((cifar.dim_data),order='F')
        d['data'][..., j] = data_img
        j = j + 1
    icvt.ut.pickle(iu.fullfile(save_dir, 'data_batch_' + str(bid)), d)
Пример #3
0
def ReadDataToHMLPEDic(imgdir,example_path, data_category, max_per_batch,save_dir):
    """
    Read all data in 'data_category'
    into HMLPE dictionary
    There is no need to generating training data, since they can be generated in
    hmlpe.py 
    """
    import scipy.io as sio
    import iutils as iu
    import iread.myio as mio
    import iread.hmlpe as hmlpe
    import imgproc
    from PIL import Image
    if data_category != 'istest':
        print 'Warn: The correctness of data type %s is not guaranteed' % data_category
    all_example = sio.loadmat(example_path)['examples']
    examples = ExtractSubExample(all_example, data_category)
    ndata = examples.shape[-1]
    iu.ensure_dir(save_dir)
    buf_size = min(ndata, max_per_batch)
    dimdic = {'data':(112,112,3), 'part_indmap':(8,8), 'joint_indmap':(8,8)} 
    nparts  = 7
    njoints = 8
    d = hmlpe.HMLPE.prepare_savebuffer(dimdic, buf_size, nparts, njoints)
    d['oridet'] = np.zeros((4,buf_size), dtype=np.int)
    d['coords'] = np.ndarray((2,29, buf_size), dtype=np.float32)
    tdsize = dimdic['data'][0]
    dsize = dimdic['data'][0] * dimdic['data'][1] * dimdic['data'][2]
    d['data'] = d['data'].reshape((dsize, -1),order='F')
    d['is_positive'][:] = True
    d['is_mirror'][:] = False
    bid = 1
    j = 0
    for i in range(ndata):
        if j == max_per_batch:
           mio.pickle(iu.fullfile(save_dir, 'data_batch_%d' % bid), d)
           bid = bid + 1
           if ndata - i < max_per_batch:
               d = hmlpe.HMLPE.prepare_savebuffer(dimdic, buf_size, nparts, njoints)
        fp = iu.fullfile(imgdir, str(examples[i]['filepath'][0]))
        img = Image.open(fp)
        tbox = examples[i]['torsobox'][0].reshape((4))
        d['filenames'][j] = fp
        d['coords'][...,j] = examples[i]['coords']
        d['oribbox'][...,j] = bbox = ExtendBndbox(tbox, img.size) 
        orijoints8 = CvtCoordsToJoints(examples[i]['coords']).reshape((8,2),order='C') - 1 # to python stype 0-idx
        d['joints8'][...,j] = TransformPoints(orijoints8, bbox, dimdic['data']).reshape((8,2),order='C')
        imgarr = imgproc.ensure_rgb(np.asarray(img))
        sub_img = Image.fromarray(imgarr[bbox[1]:bbox[3], bbox[0]:bbox[2],:])
        data_img = np.asarray(sub_img.resize((dimdic['data'][0], dimdic['data'][1]))).reshape((dsize),order='F') 
        d['data'][...,j] = data_img
        d['indmap'][...,j] = hmlpe.HMLPE.create_part_indicatormap(d['joints8'][...,j], hmlpe.part_idx, dimdic['part_indmap'], 0.3, 30.0,  12.0)
        d['joint_indmap'][...,j] = hmlpe.HMLPE.create_joint_indicatormap(d['joints8'][...,j], dimdic['joint_indmap'], 30.0, 12.0)
        d['jointmasks'][...,j] = hmlpe.HMLPE.makejointmask(dimdic['data'], d['joints8'][...,j])
        j = j + 1
    mio.pickle(iu.fullfile(save_dir, 'data_batch_%d' % bid), d)
Пример #4
0
def ReadDataToCifarDic(imgdir,example_path, data_category, max_per_batch,save_dir):
    """
        read all data in 'data_category'
        into cifar style dictionary                
    """
    import scipy.io as sio
    import iutils as iu
    import cifar
    import iconvnet_datacvt as icvt
    from iutils import imgproc as imgproc
    from PIL import Image
    if data_category != 'istest':
        print 'I haven''t implement joints8 part '
        #raise ModecError('I haven''t implement joints8 part ')
    all_examples = sio.loadmat(example_path)['examples']
    examples = ExtractSubExample(all_examples, data_category)

    ndata = examples.shape[-1]
    iu.ensure_dir(save_dir)
    s_first = min(ndata, max_per_batch)
    d = cifar.PrepareData(s_first)
    d['oridet'] = np.ndarray((4,s_first),dtype=np.int)
    d['filepath'] = [str() for x in range(s_first)]
    d['coords'] = np.ndarray((2,29,s_first),dtype=np.float32)
    tdsize= cifar.img_size[0] # make sure img_size[0] == img_size[1]
    
    j = 0
    bid = 1
    for i in range(ndata):
        if j == max_per_batch:
            icvt.ut.pickle(iu.fullfile(save_dir, 'data_batch_' + str(bid)), \
                           d)
            bid = bid + 1
            j = 0
            if ndata - i < max_per_batch:
                d = cifar.PrepareData(ndata-i)                
        fn = str(examples[i]['filepath'][0])
        fp = iu.fullfile(imgdir, fn)
        img = Image.open(fp)
        tbox = examples[i]['torsobox'][0].reshape((4))
        d['filepath'][j] = fp
        d['oridet'][...,j] = tbox
        d['oribbox'][...,j] = bbox = ExtendBndbox(tbox,img.size)
        d['coords'][...,j] = examples[i]['coords']
        orijoints8 = CvtCoordsToJoints(examples[i]['coords']).reshape((8,2),order='C')
        d['joints8'][...,j] = TransformPoints(orijoints8, bbox,cifar.img_size).reshape((16),order='C')
        img = imgproc.ensure_rgb(np.asarray(img))
        sub_img = Image.fromarray(img[bbox[1]:bbox[3], bbox[0]:bbox[2],:])
        data_img = np.asarray(sub_img.resize((cifar.img_size[0],\
                                               cifar.img_size[1]))).reshape((cifar.dim_data),order='F')
        d['data'][...,j] = data_img
        j = j + 1
    icvt.ut.pickle(iu.fullfile(save_dir, 'data_batch_' + str(bid)),d)
Пример #5
0
    def generate_sliding_detection_data_from_image(self, imgpath,
                                                   scale_pairlist):
        from PIL import Image
        import iutils as iu
        import imgproc
        self.init_meta('sliding')
        img = Image.open(imgpath)
        ndata = 0
        newdim = self.savedata_info['newdim']
        steps = self.imgdata_info['steps']
        # x1 is the size, x1 - 1 is the last pixel index
        fx = lambda x1, x2: np.floor((x1 - newdim[1]) / x2) + 1
        fy = lambda x1, x2: np.floor((x1 - newdim[0]) / x2) + 1

        valid_idx = 0
        for s in scale_pairlist:
            ns = np.floor(
                np.asarray([img.size[0], img.size[1]]) *
                np.asarray([s[0], s[1]]))
            if ns[0] < newdim[1] or ns[1] < newdim[0]:
                break
            cur_n = fx(ns[0], steps[0]) * fy(ns[1], steps[1])
            valid_idx = valid_idx + 1
            ndata = ndata + cur_n
        ndata = int(ndata)
        scale_pairlist = scale_pairlist[:valid_idx]
        print('Need to generate %d data' % ndata)
        filter_size = self.savedata_info['indmap_para']['filter_size']
        stride = self.savedata_info['indmap_para']['stride']
        mdim = self.get_indmapdim(newdim, filter_size, stride)
        joint_filter_size = self.savedata_info['indmap_para'][
            'joint_filter_size']
        joint_stride = self.savedata_info['indmap_para']['joint_stride']
        jtmdim = self.get_indmapdim(newdim, joint_filter_size, joint_stride)
        res = self.prepare_savebuffer({'data':newdim, 'part_indmap':mdim, \
                                       'joint_indmap': jtmdim}, ndata, self.meta['nparts'], self.meta['njoints'])
        dicjtname = 'joints8' if self.meta['njoints'] == 8 else 'joints'
        res[dicjtname][:] = 0
        res['jointmasks'][:] = False
        res['indmap'][:] = False
        res['joint_indmap'][:] = False
        res['is_mirror'][:] = False
        res['is_positive'][:] = False
        res['slide_location'] = np.zeros((2, ndata), dtype=np.float)
        res['scale'] = np.zeros((2, ndata), dtype=np.float)
        res['filenames'] = [imgpath for x in range(ndata)]
        idx = 0
        dimX = iu.prod(newdim)

        for s in scale_pairlist:
            ns = np.floor(
                np.asarray([img.size[0], img.size[1]]) *
                np.asarray([s[0], s[1]]))
            ns = (int(ns[0]), int(ns[1]))
            nimg = img.resize((ns[0], ns[1]))
            arrimg = imgproc.ensure_rgb(np.asarray(nimg))
            for x in range(0, ns[0] - newdim[1] + 1, steps[0]):
                for y in range(0, ns[1] - newdim[0] + 1, steps[1]):
                    res['scale'][..., idx] = np.asarray([s[0], s[1]]).reshape(
                        (2))
                    res['slide_location'][..., idx] = np.asarray([x,
                                                                  y]).reshape(
                                                                      (2))
                    res['data'][..., idx] = arrimg[y:y + newdim[0],
                                                   x:x + newdim[1], :]
                    idx = idx + 1
        if idx != ndata:
            raise HMLPEError('Number of data is not consistent')
        res['data'] = res['data'].reshape((-1, ndata), order='F')
        return res
Пример #6
0
def ReadDataToHMLPEDic(imgdir, example_path, data_category, max_per_batch,
                       save_dir):
    """
    Read all data in 'data_category'
    into HMLPE dictionary
    There is no need to generating training data, since they can be generated in
    hmlpe.py 
    """
    import scipy.io as sio
    import iutils as iu
    import iread.myio as mio
    import iread.hmlpe as hmlpe
    import imgproc
    from PIL import Image
    if data_category != 'istest':
        print 'Warn: The correctness of data type %s is not guaranteed' % data_category
    all_example = sio.loadmat(example_path)['examples']
    examples = ExtractSubExample(all_example, data_category)
    ndata = examples.shape[-1]
    iu.ensure_dir(save_dir)
    buf_size = min(ndata, max_per_batch)
    dimdic = {
        'data': (112, 112, 3),
        'part_indmap': (8, 8),
        'joint_indmap': (8, 8)
    }
    nparts = 7
    njoints = 8
    d = hmlpe.HMLPE.prepare_savebuffer(dimdic, buf_size, nparts, njoints)
    d['oridet'] = np.zeros((4, buf_size), dtype=np.int)
    d['coords'] = np.ndarray((2, 29, buf_size), dtype=np.float32)
    tdsize = dimdic['data'][0]
    dsize = dimdic['data'][0] * dimdic['data'][1] * dimdic['data'][2]
    d['data'] = d['data'].reshape((dsize, -1), order='F')
    d['is_positive'][:] = True
    d['is_mirror'][:] = False
    bid = 1
    j = 0
    for i in range(ndata):
        if j == max_per_batch:
            mio.pickle(iu.fullfile(save_dir, 'data_batch_%d' % bid), d)
            bid = bid + 1
            if ndata - i < max_per_batch:
                d = hmlpe.HMLPE.prepare_savebuffer(dimdic, buf_size, nparts,
                                                   njoints)
        fp = iu.fullfile(imgdir, str(examples[i]['filepath'][0]))
        img = Image.open(fp)
        tbox = examples[i]['torsobox'][0].reshape((4))
        d['filenames'][j] = fp
        d['coords'][..., j] = examples[i]['coords']
        d['oribbox'][..., j] = bbox = ExtendBndbox(tbox, img.size)
        orijoints8 = CvtCoordsToJoints(examples[i]['coords']).reshape(
            (8, 2), order='C') - 1  # to python stype 0-idx
        d['joints8'][..., j] = TransformPoints(orijoints8, bbox,
                                               dimdic['data']).reshape(
                                                   (8, 2), order='C')
        imgarr = imgproc.ensure_rgb(np.asarray(img))
        sub_img = Image.fromarray(imgarr[bbox[1]:bbox[3], bbox[0]:bbox[2], :])
        data_img = np.asarray(
            sub_img.resize((dimdic['data'][0], dimdic['data'][1]))).reshape(
                (dsize), order='F')
        d['data'][..., j] = data_img
        d['indmap'][..., j] = hmlpe.HMLPE.create_part_indicatormap(
            d['joints8'][..., j], hmlpe.part_idx, dimdic['part_indmap'], 0.3,
            30.0, 12.0)
        d['joint_indmap'][..., j] = hmlpe.HMLPE.create_joint_indicatormap(
            d['joints8'][..., j], dimdic['joint_indmap'], 30.0, 12.0)
        d['jointmasks'][...,
                        j] = hmlpe.HMLPE.makejointmask(dimdic['data'],
                                                       d['joints8'][..., j])
        j = j + 1
    mio.pickle(iu.fullfile(save_dir, 'data_batch_%d' % bid), d)