def getPCL(self, dpt, T): """ Get pointcloud from frame :param dpt: depth image :param T: 2D transformation of crop """ return NYUImporter.depthToPCL(dpt, T)
def getPCL(self, frame): """ Get pointcloud from frame :type frame: image frame """ return NYUImporter.depthToPCL(frame.dpt, frame.T)
def createSequence(self): """create NYU Sequence, train and test sequence""" print("create NYU dataset") di = NYUImporter('../dataset/' + self.datasetName, cacheDir=self.cacheDir) Seq1 = di.loadSequence('train', flip=True, rotation=True) # train sequence Seq2_1 = di.loadSequence('test_1') # test sequence 1 Seq2_2 = di.loadSequence('test_2') # test sequence 2 self.convert(Seq1) print("{} Train Seq1 ok!".format(self.datasetName)) self.convert(Seq2_1) print("{} Test Seq1 ok!".format(self.datasetName)) self.convert(Seq2_2) print("{} Test Seq2 ok!".format(self.datasetName))
def __init__(self, imgSeqs=None, basepath=None, localCache=True): """ constructor """ super(POSTDataset, self).__init__(imgSeqs, localCache) if basepath is None: basepath = '../../data/NYU/' self.lmi = NYUImporter(basepath)
def __init__(self, imgSeqs=None, basepath=None): """ constructor """ super(NYUDataset, self).__init__(imgSeqs) if basepath is None: basepath = '../../data/NYU/' self.lmi = NYUImporter(basepath)
def __init__(self, specs): seed = specs['seed'] root = specs['root'] subset = specs['subset'] docom = specs['docom'] print("create data") self.rng = np.random.RandomState(seed) self.di = NYUImporter(root, refineNet=None, allJoints=True, cacheDir='../../cache/') self.Seq = self.di.loadSequence(subset, shuffle=False, rng=self.rng, docom=docom) self.num = len(self.Seq.data) print(' data loaded with %d samples' % self.num)
class dataset_hand_NYU_test(dataset_hand_NYU): def __init__(self, specs): seed = specs['seed'] root = specs['root'] subset = specs['subset'] docom = specs['docom'] print("create data") self.rng = np.random.RandomState(seed) self.di = NYUImporter(root, refineNet=None, allJoints=True, cacheDir='../../cache/') self.Seq = self.di.loadSequence(subset, shuffle=False, rng=self.rng, docom=docom) self.num = len(self.Seq.data) print(' data loaded with %d samples' % self.num) def __getitem__(self, i): cube = np.asarray(self.Seq.config['cube'], 'float32') com = np.asarray(self.Seq.data[i].com, 'float32') M = np.asarray(self.Seq.data[i].T, dtype='float32') gt3D = np.asarray(self.Seq.data[i].gt3Dcrop, dtype='float32') img = np.asarray(self.Seq.data[i].dpt.copy(), 'float32') img = normalize(img, com, cube) if False: dpt = self.di.loadDepthMap(self.Seq.data[i].fileName) #print(dpt.shape) return np.expand_dims(img, axis=0), gt3D.flatten() / ( cube[2] / 2.), com, M, cube, cube #, self.Seq.data[i].fileName return np.expand_dims(img, axis=0), gt3D.flatten(), com, M, cube def __len__(self): return self.num
from net.hiddenlayer import HiddenLayer, HiddenLayerParams if __name__ == '__main__': eval_prefix = 'NYU_EMB_t0nF8mp421fD553h1024_PCA30_AUGMENT' if not os.path.exists('./eval/' + eval_prefix + '/'): os.makedirs('./eval/' + eval_prefix + '/') rng = numpy.random.RandomState(23455) print("create data") aug_modes = ['com', 'rot', 'none'] # 'sc', comref = None # "./eval/NYU_COM_AUGMENT/net_NYU_COM_AUGMENT.pkl" docom = False di = NYUImporter('../data/NYU/', refineNet=comref) Seq1 = di.loadSequence('train', shuffle=True, rng=rng, docom=docom) trainSeqs = [Seq1] Seq2_1 = di.loadSequence('test_1', docom=docom) Seq2_2 = di.loadSequence('test_2', docom=docom) testSeqs = [Seq2_1, Seq2_2] # create training data trainDataSet = NYUDataset(trainSeqs) train_data, train_gt3D = trainDataSet.imgStackDepthOnly('train') train_data_cube = numpy.asarray([Seq1.config['cube']] * train_data.shape[0], dtype='float32') train_data_com = numpy.asarray([d.com for d in Seq1.data], dtype='float32') train_gt3Dcrop = numpy.asarray([d.gt3Dcrop for d in Seq1.data],
import sys sys.path.append('../../')#add root directory from data.importers import NYUImporter from util.handdetector import HandDetector from data.transformations import transformPoints2D import numpy as np from util.preprocess import augmentCrop,norm_dm,joints_heatmap_gen import tensorflow as tf rng=np.random.RandomState(23455) from netlib.basemodel import basenet2 train_root='/home/dumyy/data/nyu/dataset/' di_train = NYUImporter(train_root, cacheDir='../../cache/NYU/',refineNet=None,allJoints=False) Seq_train = di_train.loadSequence('train', rng=rng, shuffle=True, docom=False,cube=None) train_num=len(Seq_train.data) cubes_train = np.asarray([d.cube for d in Seq_train.data], 'float32') coms_train = np.asarray([d.com for d in Seq_train.data], 'float32') Ms_train = np.asarray([d.T for d in Seq_train.data], dtype='float32') gt3Dcrops_train = np.asarray([d.gt3Dcrop for d in Seq_train.data], dtype='float32') imgs_train = np.asarray([d.dpt.copy() for d in Seq_train.data], 'float32') di_test = NYUImporter(train_root, cacheDir='../../cache/NYU/',refineNet=None,allJoints=False) Seq_test = di_test.loadSequence('test', rng=rng, shuffle=False, docom=False,cube=(250,250,250)) test_num=len(Seq_test.data) cubes_test = np.asarray([d.cube for d in Seq_test.data], 'float32') coms_test = np.asarray([d.com for d in Seq_test.data], 'float32') gt3Dcrops_test = np.asarray([d.gt3Dcrop for d in Seq_test.data], dtype='float32')
__license__ = "GPL" __version__ = "1.0" __maintainer__ = "Markus Oberweger" __email__ = "*****@*****.**" __status__ = "Development" if __name__ == '__main__': # di = ICVLImporter('../data/ICVL/') # Seq2 = di.loadSequence('test_seq_1') # testSeqs = [Seq2] # # testDataSet = ICVLDataset(testSeqs) # test_data, test_gt3D = testDataSet.imgStackDepthOnly('test_seq_1') di = NYUImporter('../data/NYU/') Seq2 = di.loadSequence('test_1') testSeqs = [Seq2] testDataSet = NYUDataset(testSeqs) test_data, test_gt3D = testDataSet.imgStackDepthOnly('test_1') # load trained network # poseNetParams = PoseRegNetParams(type=11, nChan=1, wIn=128, hIn=128, batchSize=1, numJoints=16, nDims=3) # poseNet = PoseRegNet(numpy.random.RandomState(23455), cfgParams=poseNetParams) # poseNet.load("./ICVL_network_prior.pkl") poseNetParams = PoseRegNetParams(type=11, nChan=1, wIn=128, hIn=128, batchSize=1,
from util.handpose_evaluation import NYUHandposeEvaluation import cv2 if __name__ == '__main__': eval_prefix = 'NYU_COM' if not os.path.exists('./eval/' + eval_prefix + '/'): os.makedirs('./eval/' + eval_prefix + '/') floatX = theano.config.floatX # @UndefinedVariable rng = numpy.random.RandomState(23455) print("create data") di = NYUImporter('../data/NYU/') Seq1 = di.loadSequence('train', shuffle=True, rng=rng, docom=True) trainSeqs = [Seq1] Seq2_1 = di.loadSequence('test_1', docom=True) Seq2_2 = di.loadSequence('test_2', docom=True) testSeqs = [Seq2_1, Seq2_2] # create training data trainDataSet = NYUDataset(trainSeqs) train_data, train_gt3D = trainDataSet.imgStackDepthOnly('train') mb = (train_data.nbytes) / (1024 * 1024) print("data size: {}Mb".format(mb)) testDataSet = NYUDataset(testSeqs)
from data.transformations import transformPoint2D from net.hiddenlayer import HiddenLayer, HiddenLayerParams if __name__ == '__main__': eval_prefix = 'NYU_EMB_t0nF8mp421fD553h1024_PCA30' if not os.path.exists('./eval/'+eval_prefix+'/'): os.makedirs('./eval/'+eval_prefix+'/') floatX = theano.config.floatX # @UndefinedVariable rng = numpy.random.RandomState(23455) print("create data") di = NYUImporter('../data/NYU/') Seq1 = di.loadSequence('train', shuffle=True, rng=rng) trainSeqs = [Seq1] Seq2_1 = di.loadSequence('test_1') Seq2_2 = di.loadSequence('test_2') testSeqs = [Seq2_1, Seq2_2] # create training data trainDataSet = NYUDataset(trainSeqs) train_data, train_gt3D = trainDataSet.imgStackDepthOnly('train') mb = (train_data.nbytes) / (1024 * 1024) print("data size: {}Mb".format(mb)) valDataSet = NYUDataset(testSeqs)
def loadData(self): """ load the dataset :return: data(dict) """ data = {} # dataset would be return print('create {} {} dataset'.format(self.name, self.phase)) if self.name == 'NYU': di = NYUImporter(root + '/dataset/' + self.name, cacheDir=self.cachePath) if self.phase == 'train': if self.aug: # do augmentation for training sequence = di.loadSequence('train', rotation=True, docom=True, dsize=self.dsize) else: sequence = di.loadSequence('train', docom=True, dsize=self.dsize) data = self.convert(sequence) elif self.phase == 'test': sequence1 = di.loadSequence( 'test_1', docom=True, dsize=self.dsize) # test sequence 1 sequence2 = di.loadSequence( 'test_2', docom=True, dsize=self.dsize) # test sequence 2 data_1 = self.convert(sequence1) data_2 = self.convert(sequence2) data['depth'] = np.concatenate( [data_1['depth'], data_2['depth']]) data['dpt3D'] = np.concatenate( [data_1['dpt3D'], data_2['dpt3D']]) data['com'] = np.concatenate([data_1['com'], data_2['com']]) data['inds'] = np.concatenate([data_1['inds'], data_2['inds']]) data['config'] = np.concatenate( [data_1['config'], data_2['config']]) data['joint'] = np.concatenate( [data_1['joint'], data_2['joint']]) elif self.name == 'ICVL': di = ICVLImporter(root + '/dataset/' + self.name, cacheDir=self.cachePath) if self.phase == 'train': sequence = di.loadSequence( 'train', ['0'], docom=True, dsize=self.dsize ) # we can not use augmented ICVL because of the dataset size is too big data = self.convert(sequence) elif self.phase == 'test': sequence1 = di.loadSequence( 'test_seq_1', docom=True, dsize=self.dsize) # test sequence 1 sequence2 = di.loadSequence( 'test_seq_2', docom=True, dsize=self.dsize) # test sequence 2 data_1 = self.convert(sequence1) size_1 = data_1['com'].shape[0] data_2 = self.convert( sequence2, size_before=size_1) # concate two test sequence together data['depth'] = np.concatenate( [data_1['depth'], data_2['depth']]) data['dpt3D'] = np.concatenate( [data_1['dpt3D'], data_2['dpt3D']]) data['com'] = np.concatenate([data_1['com'], data_2['com']]) data['inds'] = np.concatenate([data_1['inds'], data_2['inds']]) data['config'] = np.concatenate( [data_1['config'], data_2['config']]) data['joint'] = np.concatenate( [data_1['joint'], data_2['joint']]) else: raise Exception('unknow dataset {} or phase {}.'.format( self.name, self.phase)) return data
def main(_): start = time.clock() if args.dataset == 'NYU': if args.phase == 'train': di_train = NYUImporter(args.data_root, cacheDir='./cache/NYU/', refineNet=None, allJoints=False) Seq_train = di_train.loadSequence('train', rng=rng, shuffle=False, docom=False, cube=None) train_num = len(Seq_train.data) print('loaded over with %d train samples' % train_num) imgs = np.asarray([d.dpt.copy() for d in Seq_train.data], 'float32') gt3Dcrops = np.asarray([d.gt3Dcrop for d in Seq_train.data], dtype='float32') M = np.asarray([d.T for d in Seq_train.data], dtype='float32') com2D = np.asarray([d.com2D for d in Seq_train.data], 'float32') cube = np.asarray([d.cube for d in Seq_train.data], 'float32') # uv_crop = np.asarray([d.gtcrop for d in Seq_train.data], dtype='float32')[:, :, 0:-1] del Seq_train train_stream = MultiDataStream([imgs, gt3Dcrops, M, com2D, cube]) else: raise ValueError('error dataset %s' % args.dataset) di_test = NYUImporter(args.data_root, cacheDir='./cache/NYU/', refineNet=None, allJoints=False) Seq_test = di_test.loadSequence('test', rng=rng, shuffle=False, docom=False, cube=(250, 250, 250)) test_num = len(Seq_test.data) print('loaded over with %d test samples' % test_num) test_gt3Dcrops = np.asarray([d.gt3Dcrop for d in Seq_test.data], dtype='float32') test_M = np.asarray([d.T for d in Seq_test.data], dtype='float32') # test_com2D = np.asarray([d.com2D for d in Seq_test.data], 'float32') test_uv_crop = np.asarray([d.gtcrop for d in Seq_test.data], dtype='float32')[:, :, 0:-1] test_uv = np.asarray([d.gtorig for d in Seq_test.data], 'float32')[:, :, 0:-1] test_com3D = np.asarray([d.com3D for d in Seq_test.data], 'float32') test_cube = np.asarray([d.cube for d in Seq_test.data], 'float32') test_imgs = np.asarray([d.dpt.copy() for d in Seq_test.data], 'float32') test_data = np.ones_like(test_imgs) for it in range(test_num): test_data[it] = norm_dm(test_imgs[it], test_com3D[it], test_cube[it]) del Seq_test test_stream = MultiDataStream( [test_data, test_gt3Dcrops, test_M, test_com3D, test_uv, test_cube]) clip_index = np.int(np.floor(test_num / args.batch_size)) * args.batch_size extra_test_data = [ test_data[clip_index:], test_gt3Dcrops[clip_index:], test_M[clip_index:], test_com3D[clip_index:], test_uv[clip_index:], test_cube[clip_index:] ] gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6) tf.set_random_seed(1) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: model = Model(sess, args) model.train(args, train_stream, test_stream) if args.phase == 'train' \ else model.test(args, test_stream, extra_test_data=None) end = time.clock() print('running time: %f s' % (end - start))
C = float(sys.argv[4]) nrand = int(sys.argv[5]) seed = int(sys.argv[6]) fingers = sys.argv[7].split(",") weight = float(sys.argv[8]) n_per_sample = 2 if alpha > 0.0: n_per_sample_val = 2 else: n_per_sample_val = 1 tol = 1e-12 J = 14 di = NYUImporter('../../../DeepPrior/') Seq = di.loadSequence('train') trainDataset = NYUDataset([Seq]) X_train, Y_train = trainDataset.imgStackDepthOnly('train') use_gpu = True gpu_id = 0 if use_gpu: xp = cuda.cupy else: xp = np Y_train = xp.reshape( Y_train, (Y_train.shape[0], Y_train.shape[1] * Y_train.shape[2])) np.random.seed(0)
print 'write the result in {}'.format(file_name) with open(file_name, 'w') as f: for i in xrange(predicted_joints.shape[0]): for item in predicted_joints[i].reshape(joint_size): f.write("%s " % item) f.write("\n") predicted_joints = loadPredFile(file_name) return predicted_joints, file_name if __name__ == '__main__': # ablation or comparison ABLATION = False # ablation or comparison # test NYU dataset di = NYUImporter('../dataset/NYU/', cacheDir='../dataset/cache/') gt3D = [] Seq2_1 = di.loadSequence('test_1', docom=True) Seq2_2 = di.loadSequence('test_2', docom=True) testSeqs = [Seq2_1, Seq2_2] for seq in testSeqs: gt3D.extend([j.gt3Dorig for j in seq.data]) gt3D = np.array(gt3D) if DEBUG: print 'gt3D.shape = ', gt3D.shape model = [] weight_num = [] pred_joints = []
class dataset_hand_NYU(data.Dataset): def __init__(self, specs): seed = specs['seed'] root = specs['root'] subset = specs['subset'] docom = specs['docom'] self.rng = np.random.RandomState(seed) self.sampled_poses = None self.pose_only = False self.nmax = np.inf self.augment = specs['augment'] self.num_sample_poses = specs['sample_poses'] self.joint_subset = specs['joint_subset'] self.aug_modes = ['none', 'com', 'rot'] print("create data") self.flip_y = False com_idx = 32 cube_size = 300 if 'MSRA' in self.joint_subset: self.joint_subset = np.asarray([29, 23, 22, 20, 18, 17, 16, 14, 12, 11, 10,\ 8, 6, 5, 4, 2, 0, 28, 27, 25, 24], dtype='int32') com_idx = 17 elif 'ICVL' in self.joint_subset: self.joint_subset = np.asarray([34, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10,\ 8, 6, 4, 2, 0], dtype='int32') self.flip_y = True com_idx = 34 cube_size = 350 else: self.joint_subset = np.arange(36) self.di = NYUImporter(root, refineNet=None, allJoints=True, com_idx=com_idx, cacheDir='../../cache/') if 'synth' in subset: self.di.default_cubes[subset] = (cube_size, cube_size, cube_size) print(self.di.default_cubes[subset]) self.Seq = self.di.loadSequence(subset, rng=self.rng, shuffle=True, docom=docom) #print(self.Seq.data[0].gt3Dcrop) #self.di.showAnnotatedDepth(self.Seq.data[0]) #print('joint_subset', self.joint_subset) # create training data cube = np.asarray(self.Seq.config['cube'], 'float32') com = np.asarray(self.Seq.data[0].com, 'float32') img = np.asarray(self.Seq.data[0].dpt.copy(), 'float32') img = normalize(img, com, cube) self.hd = HandDetector(img, abs(self.di.fx), abs(self.di.fy), importer=self.di) self.num = len(self.Seq.data) print(' data loaded with %d samples' % self.num) def sample_poses(self): train_cube = np.asarray([self.Seq.config['cube']] * self.num, dtype='float32') train_com = np.asarray([d.com for d in self.Seq.data], dtype='float32') train_gt3D = np.asarray([d.gt3Dcrop for d in self.Seq.data], dtype='float32') self.sampled_poses = self.hd.sampleRandomPoses(self.di, self.rng, train_gt3D, train_com,\ train_cube, self.num_sample_poses, self.nmax, \ self.aug_modes)#.reshape((-1, train_gt3D.shape[1]*3)) self.num = self.sampled_poses.shape[0] self.nmax = self.sampled_poses.shape[0] print('%d sample poses created!' % self.num) def __getitem__(self, i): if self.pose_only and self.sampled_poses is not None: pos = self.sampled_poses[i][self.joint_subset] if self.flip_y: pos[:, 1] *= -1 return pos.flatten() cube = np.asarray(self.Seq.config['cube'], 'float32') com = np.asarray(self.Seq.data[i].com, 'float32') M = np.asarray(self.Seq.data[i].T, dtype='float32') gt3D = np.asarray(self.Seq.data[i].gt3Dcrop, dtype='float32') img = np.asarray(self.Seq.data[i].dpt.copy(), 'float32') img = normalize(img, com, cube) if not self.augment: if self.joint_subset is not None: gt3D = gt3D[self.joint_subset] if self.flip_y: gt3D[:, 1] *= -1 if self.pose_only: return gt3D.flatten() / (cube[2] / 2.) #print(img.shape, gt3D.flatten().shape, com.shape, M.shape, cube.shape) return np.expand_dims( img, axis=0), gt3D.flatten() / (cube[2] / 2.), com, M, cube, cube img, _, gt3D, cube, com2D, M, _ = augmentCrop(img, gt3D, \ self.di.joint3DToImg(com), cube, M, self.aug_modes, self.hd, rng=self.rng) if self.joint_subset is not None: gt3D = gt3D[self.joint_subset] if self.flip_y: gt3D[:, 1] *= -1 if self.pose_only: return gt3D.flatten() #print(imgD.shape, gt3Dn.flatten().shape, com.shape, M.shape, cube.shape) return np.expand_dims( img, axis=0), gt3D.flatten(), self.di.jointImgTo3D(com2D), M, cube, cube def set_nmax(self, frac): self.nmax = int(self.num * frac) print('self.nmax %d' % self.nmax) def __len__(self): return np.minimum(self.num, self.nmax)
from data.importers import NYUImporter from data.transformations import transformPoints2D from util.handdetector import HandDetector import numpy as np from util.preprocess import augmentCrop, norm_dm import matplotlib.pyplot as plt import tensorflow as tf rng = np.random.RandomState(23455) from netlib.basemodel import basenet2 visual = True train_root = '/home/dumyy/data/nyu/dataset/' di_1 = NYUImporter(train_root, cacheDir='../../cache/NYU/', refineNet=None, allJoints=False) Seq_train = di_1.loadSequence('test', rng=rng, shuffle=False, docom=False, cube=(250, 250, 250)) test_num = len(Seq_train.data) print test_num cubes = np.asarray([d.cube for d in Seq_train.data], 'float32') coms = np.asarray([d.com for d in Seq_train.data], 'float32') Ms = np.asarray([d.T for d in Seq_train.data], dtype='float32') gt3Dcrops = np.asarray([d.gt3Dcrop for d in Seq_train.data], dtype='float32') imgs = np.asarray([d.dpt.copy() for d in Seq_train.data], 'float32') test_data = np.ones_like(imgs)
vector_me = chainer.Variable((joints_me - gt).astype('float32')) vector_max = chainer.Variable((joints_max - gt).astype('float32')) vector_ff = chainer.Variable((joints_ff - gt).astype('float32')) euclidian_distance = euclidian_joints(vector_me) max_error = max_euclidian_joints(vector_max) ff_error = number_frames_within_dist(vector_ff) return euclidian_distance, max_error, ff_error if __name__ == '__main__': global gpu_id dataset = 'test' di = NYUImporter('../../DeepPrior/data/NYU') #datadir = '../../DeepPrior/src/data' #di = NYUImporter(datadir) Seq = di.loadSequence(dataset) testSeqs = [Seq] testDataset = NYUDataset(testSeqs) use_gpu = True gpu_id = 0 if use_gpu: xp = cuda.cupy else: xp = np J = 14 X_test, Y_test = testDataset.imgStackDepthOnly(dataset) Y_test = xp.reshape(Y_test, (Y_test.shape[0], Y_test.shape[1] * Y_test.shape[2]))
#!/usr/bin/env python import _init_paths import numpy as np from data.importers import NYUImporter from data.dataset import NYUDataset from util.realtimehandposepipeline import RealtimeHandposePipeline root = '/home/wuyiming/git/Hand' if __name__ == '__main__': di = NYUImporter(root + '/dataset/NYU', cacheDir=root + '/dataset/cache/') Seq2 = di.loadSequence('test_1', docom=True) testSeqs = [Seq2] testDataSet = NYUDataset(testSeqs) test_data, test_gt3D = testDataSet.imgStackDepthOnly('test_1') config = {'fx': 588., 'fy': 587., 'cube': (300, 300, 300)} netPath = root + '/models/NYU/hand_mix/hand_mix.prototxt' netWeight = root + '/weights/NYU/hand_mix/hand_mix_iter_100000.caffemodel' rtp = RealtimeHandposePipeline(di, config, netPath, netWeight) # use filenames filenames = [] for i in testSeqs[0].data: filenames.append(i.fileName) rtp.processFiles(filenames)
def __init__(self, specs): seed = specs['seed'] root = specs['root'] subset = specs['subset'] docom = specs['docom'] self.rng = np.random.RandomState(seed) self.sampled_poses = None self.pose_only = False self.nmax = np.inf self.augment = specs['augment'] self.num_sample_poses = specs['sample_poses'] self.joint_subset = specs['joint_subset'] self.aug_modes = ['none', 'com', 'rot'] print("create data") self.flip_y = False com_idx = 32 cube_size = 300 if 'MSRA' in self.joint_subset: self.joint_subset = np.asarray([29, 23, 22, 20, 18, 17, 16, 14, 12, 11, 10,\ 8, 6, 5, 4, 2, 0, 28, 27, 25, 24], dtype='int32') com_idx = 17 elif 'ICVL' in self.joint_subset: self.joint_subset = np.asarray([34, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10,\ 8, 6, 4, 2, 0], dtype='int32') self.flip_y = True com_idx = 34 cube_size = 350 else: self.joint_subset = np.arange(36) self.di = NYUImporter(root, refineNet=None, allJoints=True, com_idx=com_idx, cacheDir='../../cache/') if 'synth' in subset: self.di.default_cubes[subset] = (cube_size, cube_size, cube_size) print(self.di.default_cubes[subset]) self.Seq = self.di.loadSequence(subset, rng=self.rng, shuffle=True, docom=docom) #print(self.Seq.data[0].gt3Dcrop) #self.di.showAnnotatedDepth(self.Seq.data[0]) #print('joint_subset', self.joint_subset) # create training data cube = np.asarray(self.Seq.config['cube'], 'float32') com = np.asarray(self.Seq.data[0].com, 'float32') img = np.asarray(self.Seq.data[0].dpt.copy(), 'float32') img = normalize(img, com, cube) self.hd = HandDetector(img, abs(self.di.fx), abs(self.di.fy), importer=self.di) self.num = len(self.Seq.data) print(' data loaded with %d samples' % self.num)