コード例 #1
0
    def createSequence(self):
        """create ICVL Sequence, train and test sequence"""
        print("create ICVL dataset")

        di = ICVLImporter('../dataset/' + self.datasetName,
                          cacheDir=self.cacheDir)
        Seq1 = di.loadSequence('train')  # use dataset totally
        Seq2_1 = di.loadSequence('test_seq_1')
        Seq2_2 = di.loadSequence('test_seq_2')

        self.convert(Seq1)
        print("{} Train Seq1 ok!".format(self.datasetName))
        self.convert(Seq2_1)
        print("{} Test Seq1 ok!".format(self.datasetName))
        self.convert(Seq2_2)
        print("{} Test Seq2 ok!".format(self.datasetName))
コード例 #2
0
    def getPCL(self, frame):
        """
        Get pointcloud from frame

        :type frame: image frame
        """

        return ICVLImporter.depthToPCL(frame.dpt, frame.T)
コード例 #3
0
ファイル: dataset_hand2.py プロジェクト: xyhak47/LSPS
    def __init__(self, specs):
        seed = specs['seed']
        root = specs['root']
        subset = specs['subset']
        docom = specs['docom']
        print("create data")

        self.rng = np.random.RandomState(seed)
        self.di = ICVLImporter(root, refineNet=None, cacheDir='../../cache/')

        self.Seq1 = self.di.loadSequence(subset, docom=docom)
        self.Seq2 = self.di.loadSequence(subset.replace('1', '2'), docom=docom)

        self.num = len(self.Seq1.data) + len(self.Seq2.data)
        print(' data loaded with %d samples' % self.num)

        self.len_seq1 = len(self.Seq1.data)
コード例 #4
0
    def getPCL(self, dpt, T):
        """
        Get pointcloud from frame
        :param dpt: depth image
        :param T: 2D transformation of crop
        """

        return ICVLImporter.depthToPCL(dpt, T)
    def __init__(self, imgSeqs=None, basepath=None, localCache=True):
        """
        constructor
        """
        super(ICVLDataset, self).__init__(imgSeqs, localCache)
        if basepath is None:
            basepath = '../../data/ICVL/'

        self.lmi = ICVLImporter(basepath)
コード例 #6
0
ファイル: dataset_hand2.py プロジェクト: xyhak47/LSPS
class dataset_hand_ICVL_test(dataset_hand_ICVL):
    def __init__(self, specs):
        seed = specs['seed']
        root = specs['root']
        subset = specs['subset']
        docom = specs['docom']
        print("create data")

        self.rng = np.random.RandomState(seed)
        self.di = ICVLImporter(root, refineNet=None, cacheDir='../../cache/')

        self.Seq1 = self.di.loadSequence(subset, docom=docom)
        self.Seq2 = self.di.loadSequence(subset.replace('1', '2'), docom=docom)

        self.num = len(self.Seq1.data) + len(self.Seq2.data)
        print(' data loaded with %d samples' % self.num)

        self.len_seq1 = len(self.Seq1.data)

    def __getitem__(self, i):

        if i < self.len_seq1:
            cube = np.asarray(self.Seq1.config['cube'], 'float32')
            com = np.asarray(self.Seq1.data[i].com, 'float32')
            M = np.asarray(self.Seq1.data[i].T, dtype='float32')
            gt3D = np.asarray(self.Seq1.data[i].gt3Dcrop, dtype='float32')
            img = np.asarray(self.Seq1.data[i].dpt.copy(), 'float32')
        else:
            cube = np.asarray(self.Seq2.config['cube'], 'float32')
            com = np.asarray(self.Seq2.data[i - self.len_seq1].com, 'float32')
            M = np.asarray(self.Seq2.data[i - self.len_seq1].T,
                           dtype='float32')
            gt3D = np.asarray(self.Seq2.data[i - self.len_seq1].gt3Dcrop,
                              dtype='float32')
            img = np.asarray(self.Seq2.data[i - self.len_seq1].dpt.copy(),
                             'float32')

        img = normalize(img, com, cube)

        return np.expand_dims(
            img, axis=0), gt3D.flatten() / (cube[2] / 2.), com, M, cube

    def __len__(self):
        return self.num
コード例 #7
0
ファイル: dataset_hand2.py プロジェクト: xyhak47/LSPS
    def __init__(self, specs):
        seed = specs['seed']
        root = specs['root']
        subset = specs['subset']
        docom = specs['docom']

        self.rng = np.random.RandomState(seed)
        self.sampled_poses = None
        self.pose_only = False
        self.nmax = np.inf
        self.augment = specs['augment']
        self.num_sample_poses = specs['sample_poses']
        self.joint_subset = specs['joint_subset']
        self.aug_modes = ['none', 'com', 'rot']
        print("create data")

        self.di = ICVLImporter(root, cacheDir='../../cache/')

        self.Seq = self.di.loadSequence(subset, ['0'],
                                        rng=self.rng,
                                        shuffle=True,
                                        docom=docom)

        #print(self.Seq.data[0].gt3Dcrop)
        #self.di.showAnnotatedDepth(self.Seq.data[0])

        # create training data
        cube = np.asarray(self.Seq.config['cube'], 'float32')
        com = np.asarray(self.Seq.data[0].com, 'float32')
        img = np.asarray(self.Seq.data[0].dpt.copy(), 'float32')
        img = normalize(img, com, cube)

        self.hd = HandDetector(img,
                               abs(self.di.fx),
                               abs(self.di.fy),
                               importer=self.di)
        self.num = len(self.Seq.data)
        print(' data loaded with %d samples' % self.num)
コード例 #8
0
from data.dataset import ICVLDataset
from util.handpose_evaluation import ICVLHandposeEvaluation
from util.helpers import shuffle_many_inplace

if __name__ == '__main__':

    eval_prefix = 'ICVL_COM_AUGMENT'
    if not os.path.exists('./eval/'+eval_prefix+'/'):
        os.makedirs('./eval/'+eval_prefix+'/')

    rng = numpy.random.RandomState(23455)

    print("create data")
    aug_modes = ['com', 'rot', 'none']  # 'sc',

    di = ICVLImporter('../data/ICVL/')
    Seq1_1 = di.loadSequence('train', ['0'], shuffle=True, rng=rng, docom=False)
    Seq1_1 = Seq1_1._replace(name='train_gt')
    Seq1_2 = di.loadSequence('train', ['0'], shuffle=True, rng=rng, docom=True)
    Seq1_2 = Seq1_2._replace(name='train_com')
    trainSeqs = [Seq1_1, Seq1_2]

    Seq2 = di.loadSequence('test_seq_1', docom=True)
    testSeqs = [Seq2]

    # create training data
    trainDataSet = ICVLDataset(trainSeqs)
    nSamp = numpy.sum([len(s.data) for s in trainSeqs])
    d1, g1 = trainDataSet.imgStackDepthOnly(trainSeqs[0].name)
    train_data = numpy.ones((nSamp, d1.shape[1], d1.shape[2], d1.shape[3]), dtype='float32')
    train_gt3D = numpy.ones((nSamp, g1.shape[1], g1.shape[2]), dtype='float32')
コード例 #9
0
from data.transformations import transformPoint2D
from net.hiddenlayer import HiddenLayer, HiddenLayerParams

if __name__ == '__main__':

    eval_prefix = 'ICVL_EMB_t0nF8mp421fD553h1024_PCA30'
    if not os.path.exists('./eval/' + eval_prefix + '/'):
        os.makedirs('./eval/' + eval_prefix + '/')

    floatX = theano.config.floatX  # @UndefinedVariable

    rng = numpy.random.RandomState(23455)

    print("create data")

    di = ICVLImporter('../data/ICVL/')
    Seq1 = di.loadSequence('train', ['0'], shuffle=True, rng=rng)
    trainSeqs = [Seq1]

    Seq2 = di.loadSequence('test_seq_1')
    testSeqs = [Seq2]

    # create training data
    trainDataSet = ICVLDataset(trainSeqs)
    train_data, train_gt3D = trainDataSet.imgStackDepthOnly('train')

    mb = (train_data.nbytes) / (1024 * 1024)
    print("data size: {}Mb".format(mb))

    valDataSet = ICVLDataset(testSeqs)
    val_data, val_gt3D = valDataSet.imgStackDepthOnly('test_seq_1')
コード例 #10
0
def main(_):
    start = time.clock()

    if args.dataset == 'ICVL':
        if args.phase == 'train':
            di_train = ICVLImporter(args.data_root,
                                    cacheDir='./cache/ICVL/',
                                    refineNet=None)
            Seq_train = di_train.loadSequence('Training_2014',
                                              rng=rng,
                                              shuffle=False,
                                              docom=False,
                                              cube=None)
            train_num = len(Seq_train.data)
            print('loaded over with %d train samples' % train_num)
            imgs = np.asarray([d.dpt.copy() for d in Seq_train.data],
                              'float32')
            gt3Dcrops = np.asarray([d.gt3Dcrop for d in Seq_train.data],
                                   dtype='float32')
            M = np.asarray([d.T for d in Seq_train.data], dtype='float32')
            com2D = np.asarray([d.com2D for d in Seq_train.data], 'float32')
            cube = np.asarray([d.cube for d in Seq_train.data], 'float32')
            # uv_crop = np.asarray([d.gtcrop for d in Seq_train.data], dtype='float32')[:, :, 0:-1]
            del Seq_train

            train_stream = MultiDataStream([imgs, gt3Dcrops, M, com2D, cube])
    else:
        raise ValueError('error dataset %s' % args.dataset)

    di_test = ICVLImporter(args.data_root,
                           cacheDir='./cache/ICVL/',
                           refineNet=None)
    Seq_test = di_test.loadSequence('Testing_2014',
                                    rng=rng,
                                    shuffle=False,
                                    docom=False,
                                    cube=(250, 250, 250))
    test_num = len(Seq_test.data)
    print('loaded over with %d test samples' % test_num)
    test_gt3Dcrops = np.asarray([d.gt3Dcrop for d in Seq_test.data],
                                dtype='float32')
    test_M = np.asarray([d.T for d in Seq_test.data], dtype='float32')
    # test_com2D = np.asarray([d.com2D for d in Seq_test.data], 'float32')
    # test_uv_crop = np.asarray([d.gtcrop for d in Seq_test.data], dtype='float32')[:, :, 0:-1]
    test_uv = np.asarray([d.gtorig for d in Seq_test.data], 'float32')[:, :,
                                                                       0:-1]
    test_com3D = np.asarray([d.com3D for d in Seq_test.data], 'float32')
    test_cube = np.asarray([d.cube for d in Seq_test.data], 'float32')
    test_imgs = np.asarray([d.dpt.copy() for d in Seq_test.data], 'float32')
    test_data = np.ones_like(test_imgs)
    for it in range(test_num):
        test_data[it] = norm_dm(test_imgs[it], test_com3D[it], test_cube[it])
    del Seq_test
    test_stream = MultiDataStream(
        [test_data, test_gt3Dcrops, test_M, test_com3D, test_uv, test_cube])
    clip_index = np.int(np.floor(test_num / args.batch_size)) * args.batch_size
    extra_test_data = [
        test_data[clip_index:], test_gt3Dcrops[clip_index:],
        test_M[clip_index:], test_com3D[clip_index:], test_uv[clip_index:],
        test_cube[clip_index:]
    ]

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
    tf.set_random_seed(1)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        model = Model(sess, args)
        model.train(args, train_stream, test_stream) if args.phase == 'train' \
            else model.test(args, test_stream, extra_test_data=None)
        end = time.clock()
        print('running time: %f s' % (end - start))
コード例 #11
0
from net.hiddenlayer import HiddenLayer, HiddenLayerParams

if __name__ == '__main__':

    eval_prefix = 'ICVL_EMB_t0nF8mp421fD553h1024_PCA30_AUGMENT'
    if not os.path.exists('./eval/' + eval_prefix + '/'):
        os.makedirs('./eval/' + eval_prefix + '/')

    rng = numpy.random.RandomState(23455)

    print("create data")
    aug_modes = ['com', 'rot', 'none']  # 'sc',

    comref = None  # "./eval/ICVL_COM_AUGMENT/net_ICVL_COM_AUGMENT.pkl"
    docom = False
    di = ICVLImporter('../data/ICVL/', refineNet=comref)
    Seq1 = di.loadSequence('train', ['0'], shuffle=True, rng=rng, docom=docom)
    trainSeqs = [Seq1]

    Seq2 = di.loadSequence('test_seq_1')
    testSeqs = [Seq2]

    # create training data
    trainDataSet = ICVLDataset(trainSeqs)
    train_data, train_gt3D = trainDataSet.imgStackDepthOnly('train')
    train_data_cube = numpy.asarray([Seq1.config['cube']] *
                                    train_data.shape[0],
                                    dtype='float32')
    train_data_com = numpy.asarray([d.com for d in Seq1.data], dtype='float32')
    train_gt3Dcrop = numpy.asarray([d.gt3Dcrop for d in Seq1.data],
                                   dtype='float32')
コード例 #12
0
ファイル: dataset_hand2.py プロジェクト: xyhak47/LSPS
class dataset_hand_ICVL(data.Dataset):
    def __init__(self, specs):
        seed = specs['seed']
        root = specs['root']
        subset = specs['subset']
        docom = specs['docom']

        self.rng = np.random.RandomState(seed)
        self.sampled_poses = None
        self.pose_only = False
        self.nmax = np.inf
        self.augment = specs['augment']
        self.num_sample_poses = specs['sample_poses']
        self.joint_subset = specs['joint_subset']
        self.aug_modes = ['none', 'com', 'rot']
        print("create data")

        self.di = ICVLImporter(root, cacheDir='../../cache/')

        self.Seq = self.di.loadSequence(subset, ['0'],
                                        rng=self.rng,
                                        shuffle=True,
                                        docom=docom)

        #print(self.Seq.data[0].gt3Dcrop)
        #self.di.showAnnotatedDepth(self.Seq.data[0])

        # create training data
        cube = np.asarray(self.Seq.config['cube'], 'float32')
        com = np.asarray(self.Seq.data[0].com, 'float32')
        img = np.asarray(self.Seq.data[0].dpt.copy(), 'float32')
        img = normalize(img, com, cube)

        self.hd = HandDetector(img,
                               abs(self.di.fx),
                               abs(self.di.fy),
                               importer=self.di)
        self.num = len(self.Seq.data)
        print(' data loaded with %d samples' % self.num)

    def sample_poses(self):
        train_cube = np.asarray([self.Seq.config['cube']] * self.num,
                                dtype='float32')
        train_com = np.asarray([d.com for d in self.Seq.data], dtype='float32')
        train_gt3D = np.asarray([d.gt3Dcrop for d in self.Seq.data],
                                dtype='float32')

        self.sampled_poses = self.hd.sampleRandomPoses(self.di, self.rng, train_gt3D, train_com,\
     train_cube, self.num_sample_poses, self.nmax, \
     self.aug_modes).reshape((-1, train_gt3D.shape[1]*3))
        self.num = self.sampled_poses.shape[0]
        self.nmax = self.sampled_poses.shape[0]
        print('%d sample poses created!' % self.num)

    def __getitem__(self, i):

        if self.pose_only and self.sampled_poses is not None:
            return self.sampled_poses[i]

        cube = np.asarray(self.Seq.config['cube'], 'float32')
        com = np.asarray(self.Seq.data[i].com, 'float32')
        M = np.asarray(self.Seq.data[i].T, dtype='float32')
        gt3D = np.asarray(self.Seq.data[i].gt3Dcrop, dtype='float32')
        img = np.asarray(self.Seq.data[i].dpt.copy(), 'float32')
        img = normalize(img, com, cube)

        if not self.augment:
            if self.pose_only:
                return gt3D.flatten() / (cube[2] / 2.)
            #print(img.shape, gt3D.flatten().shape, com.shape, M.shape, cube.shape)
            return np.expand_dims(
                img, axis=0), gt3D.flatten() / (cube[2] / 2.), com, M, cube

        img, _, gt3D, cube, com2D, M, _ = augmentCrop(img, gt3D, \
     self.di.joint3DToImg(com), cube, M, self.aug_modes, self.hd, rng=self.rng)

        if self.pose_only:
            return gt3D.flatten()

        #print(imgD.shape, gt3Dn.flatten().shape, com.shape, M.shape, cube.shape)
        return np.expand_dims(
            img, axis=0), gt3D.flatten(), self.di.jointImgTo3D(com2D), M, cube

    def set_nmax(self, frac):
        self.nmax = int(self.num * frac)
        print('self.nmax %d' % self.nmax)

    def __len__(self):
        return np.minimum(self.num, self.nmax)
コード例 #13
0
    def loadData(self):
        """
        load the dataset
        :return: data(dict)
        """
        data = {}  # dataset would be return
        print('create {} {} dataset'.format(self.name, self.phase))
        if self.name == 'NYU':
            di = NYUImporter(root + '/dataset/' + self.name,
                             cacheDir=self.cachePath)
            if self.phase == 'train':
                if self.aug:  # do augmentation for training
                    sequence = di.loadSequence('train',
                                               rotation=True,
                                               docom=True,
                                               dsize=self.dsize)
                else:
                    sequence = di.loadSequence('train',
                                               docom=True,
                                               dsize=self.dsize)
                data = self.convert(sequence)
            elif self.phase == 'test':
                sequence1 = di.loadSequence(
                    'test_1', docom=True, dsize=self.dsize)  # test sequence 1
                sequence2 = di.loadSequence(
                    'test_2', docom=True, dsize=self.dsize)  # test sequence 2
                data_1 = self.convert(sequence1)
                data_2 = self.convert(sequence2)

                data['depth'] = np.concatenate(
                    [data_1['depth'], data_2['depth']])
                data['dpt3D'] = np.concatenate(
                    [data_1['dpt3D'], data_2['dpt3D']])
                data['com'] = np.concatenate([data_1['com'], data_2['com']])
                data['inds'] = np.concatenate([data_1['inds'], data_2['inds']])
                data['config'] = np.concatenate(
                    [data_1['config'], data_2['config']])
                data['joint'] = np.concatenate(
                    [data_1['joint'], data_2['joint']])

        elif self.name == 'ICVL':
            di = ICVLImporter(root + '/dataset/' + self.name,
                              cacheDir=self.cachePath)
            if self.phase == 'train':
                sequence = di.loadSequence(
                    'train', ['0'], docom=True, dsize=self.dsize
                )  # we can not use augmented ICVL because of the dataset size is too big
                data = self.convert(sequence)
            elif self.phase == 'test':
                sequence1 = di.loadSequence(
                    'test_seq_1', docom=True,
                    dsize=self.dsize)  # test sequence 1
                sequence2 = di.loadSequence(
                    'test_seq_2', docom=True,
                    dsize=self.dsize)  # test sequence 2
                data_1 = self.convert(sequence1)
                size_1 = data_1['com'].shape[0]
                data_2 = self.convert(
                    sequence2,
                    size_before=size_1)  # concate two test sequence together

                data['depth'] = np.concatenate(
                    [data_1['depth'], data_2['depth']])
                data['dpt3D'] = np.concatenate(
                    [data_1['dpt3D'], data_2['dpt3D']])
                data['com'] = np.concatenate([data_1['com'], data_2['com']])
                data['inds'] = np.concatenate([data_1['inds'], data_2['inds']])
                data['config'] = np.concatenate(
                    [data_1['config'], data_2['config']])
                data['joint'] = np.concatenate(
                    [data_1['joint'], data_2['joint']])
        else:
            raise Exception('unknow dataset {} or phase {}.'.format(
                self.name, self.phase))

        return data
コード例 #14
0
ファイル: evaluationICVL.py プロジェクト: weleen/CADSTN.caffe
        hpe.append(ICVLHandposeEvaluation(gt3D, joints))
        hpe[ind].subfolder += eval_prefix[ind]+'/'
        mean_error = hpe[ind].getMeanError()
        max_error = hpe[ind].getMaxError()
        print("Test on {}_{}".format(model[ind], weight_num[ind]))
        print("Mean error: {}mm, max error: {}mm".format(mean_error, max_error))
        print("MD score: {}".format(hpe[ind].getMDscore(80)))

        print("{}".format([hpe[ind].getJointMeanError(j) for j in range(joints[0].shape[0])]))
        print("{}".format([hpe[ind].getJointMaxError(j) for j in range(joints[0].shape[0])]))

    print "Testing baseline"
    #################################
    # BASELINE
    # Load the evaluation
    di = ICVLImporter('../dataset/ICVL/', cacheDir='../dataset/cache/')
    data_baseline = di.loadBaseline('../dataset/ICVL/Results/LRF_Results_seq_1.txt')

    hpe_base = ICVLHandposeEvaluation(gt3D, data_baseline)
    hpe_base.subfolder += eval_prefix[0]+'/'
    print("Mean error: {}mm".format(hpe_base.getMeanError()))

    plot_list = zip(model, hpe)
    hpe_base.plotEvaluation(eval_prefix[0], methodName='Tang et al.', baseline=plot_list)

    Seq2_1 = di.loadSequence('test_seq_1')
    Seq2_2 = di.loadSequence('test_seq_2')
    testSeqs = [Seq2_1, Seq2_2]

    for index in xrange(len(hpe)):
        ind = 0