Example #1
0
def plotUsers(image, users):
    # embed()
    # if type(users) == dict:
    # users = [u for u in users.values()]

    # for u in users:
    # uvw = [-1]
    for u in users.keys():
        if users[u]['tracked']:
            xyz = users[u]['com']
            uvw = skel2depth(np.array([xyz]), image.shape)[0]
            ''' Only plot if there are valid coordinates (not [0,0,0])'''
            if uvw[0] > 0:
                if users[u]['tracked'] and len(
                        users[u]['jointPositions'].keys()) > 0:
                    '''Colorize COM'''
                    cv2.rectangle(image, tuple([uvw[0] - 3, uvw[1] - 3]),
                                  tuple([uvw[0] + 3, uvw[1] + 3]), (4000))
                    '''Plot skeleton'''
                    pts = [j for j in users[u]['jointPositions'].values()]
                    skel = skel2depth(np.array(pts), image.shape)
                    from pyKinectTools.utils.SkeletonUtils import display_skeletons
                    image = display_skeletons(image,
                                              skel,
                                              color=(image.max(), 0, 0),
                                              skel_type='Kinect')
def learn_msr_video(name, offsets_1, offsets_2):
	'''
	'''
	depth_file = name + "depth.bin"
	color_file = name + "rgb.avi"
	skeleton_file = name + "skeleton.txt"
	''' Read data from each video/sequence '''
	try:
		depthIms, maskIms = read_MSR_depth_ims(depth_file)
		depthIms *= maskIms
		depthIms /= 10
		colorIms = read_MSR_color_ims(color_file)
		skels_world, _ = read_MSR_skeletons(skeleton_file)
		skels_world[:,2]/=10
	except:
		print "Error getting frame features"
		return -1,-1

	all_features = []
	all_labels = []
	for i in xrange(0, len(depthIms), 25):
		im_depth = depthIms[i]
		skel_pos = skel2depth(skels_world[i], rez=[240,320])
		# ''' --- NOTE THIS 10 PX OFFSET IN THE MSR DATASET !!! --- '''
		# skel_pos[:,0] -= 10

		features, pixel_labels = learn_frame(im_depth, skel_pos, offsets_1, offsets_2)
		''' Stack features '''
		all_features += [features]
		all_labels += [pixel_labels]

	return np.concatenate(all_features), np.concatenate(all_labels)
Example #3
0
def learn_msr_video(name, offsets_1, offsets_2):
    '''
	'''
    depth_file = name + "depth.bin"
    color_file = name + "rgb.avi"
    skeleton_file = name + "skeleton.txt"
    ''' Read data from each video/sequence '''
    try:
        depthIms, maskIms = read_MSR_depth_ims(depth_file)
        depthIms *= maskIms
        depthIms /= 10
        colorIms = read_MSR_color_ims(color_file)
        skels_world, _ = read_MSR_skeletons(skeleton_file)
        skels_world[:, 2] /= 10
    except:
        print "Error getting frame features"
        return -1, -1

    all_features = []
    all_labels = []
    for i in xrange(0, len(depthIms), 25):
        im_depth = depthIms[i]
        skel_pos = skel2depth(skels_world[i], rez=[240, 320])
        # ''' --- NOTE THIS 10 PX OFFSET IN THE MSR DATASET !!! --- '''
        # skel_pos[:,0] -= 10

        features, pixel_labels = learn_frame(im_depth, skel_pos, offsets_1,
                                             offsets_2)
        ''' Stack features '''
        all_features += [features]
        all_labels += [pixel_labels]

    return np.concatenate(all_features), np.concatenate(all_labels)
Example #4
0
def transform_skels(skels, transformation, output='image'):
    '''
    ---Parameters---
    skels : list of skeletons in frame 1
    transformation : 4x4 transform from frame 1 to 2
    output : 'image' or 'world' for either coordinate system
    ---Result---
    skels_out : skeletons in frame 2
    '''
    skels_out = []
    for skel_c1 in skels:
        if np.all(skel_c1 != -1):
            skels_mask = skel_c1 == 0
            # Convert to depth coordinate system
            skel_c1 = depth2world(skel2depth(skel_c1, [240, 320]), [240, 320])
            # Transform from cam1 -> cam2
            skel_c2 = np.dot(transformation[:3, :3],
                             skel_c1.T).T + transformation[:3, 3]

            if len(skel_c2) != N_MSR_JOINTS:
                skel_c2 = kinect_to_msr_skel(skel_c2)

            skel_c2[skels_mask] = 0

            if output == 'world':
                skels_out += [skel_c2]
            elif output == 'image':
                # Get skel in image (cam2) coordinates
                skel_im2 = world2depth(skel_c2, [240, 320])
                skels_out += [skel_im2]

    return skels_out
Example #5
0
def transform_skels(skels, transformation, output='image'):
    '''
    ---Parameters---
    skels : list of skeletons in frame 1
    transformation : 4x4 transform from frame 1 to 2
    output : 'image' or 'world' for either coordinate system
    ---Result---
    skels_out : skeletons in frame 2
    '''
    skels_out = []
    for skel_c1 in skels:
        if np.all(skel_c1 != -1):
            skels_mask = skel_c1 == 0
            # Convert to depth coordinate system
            skel_c1 = depth2world(skel2depth(skel_c1, [240,320]), [240,320])
            # Transform from cam1 -> cam2
            skel_c2 = np.dot(transformation[:3,:3], skel_c1.T).T + transformation[:3,3]

            if len(skel_c2) != N_MSR_JOINTS:
                skel_c2 = kinect_to_msr_skel(skel_c2)

            skel_c2[skels_mask] = 0

            if output=='world':
                skels_out += [skel_c2]
            elif output=='image':
                # Get skel in image (cam2) coordinates
                skel_im2 = world2depth(skel_c2, [240,320])
                skels_out += [skel_im2]


    return skels_out
Example #6
0
def plotUsers(image, users):
    # embed()
    # if type(users) == dict:
        # users = [u for u in users.values()]

    # for u in users:
    # uvw = [-1]
    for u in users.keys():
        if users[u]['tracked']:
            xyz = users[u]['com']
            uvw = skel2depth(np.array([xyz]), image.shape)[0]
            ''' Only plot if there are valid coordinates (not [0,0,0])'''
            if uvw[0] > 0:
                if users[u]['tracked'] and len(users[u]['jointPositions'].keys()) > 0:

                    '''Colorize COM'''
                    cv2.rectangle(image, tuple([uvw[0]-3, uvw[1]-3]), tuple([uvw[0]+3, uvw[1]+3]), (4000))
                    '''Plot skeleton'''
                    pts = [j for j in users[u]['jointPositions'].values()]
                    skel = skel2depth(np.array(pts), image.shape)
                    from pyKinectTools.utils.SkeletonUtils import display_skeletons
                    image = display_skeletons(image, skel, color=(image.max(),0,0), skel_type='Kinect')
Example #7
0
def main(visualize=True):
    n_cameras = 1
    cam = KinectPlayer(base_dir='./',
                       device=1,
                       bg_subtraction=True,
                       get_depth=True,
                       get_color=True,
                       get_skeleton=False,
                       fill_images=False)
    if n_cameras == 2:
        cam2 = KinectPlayer(base_dir='./',
                            device=2,
                            bg_subtraction=True,
                            get_depth=True,
                            get_color=get_color,
                            get_skeleton=get_skel,
                            fill_images=fill)
    # Transformation matrix from first to second camera
    try:
        data = pickle.load(open("Registration.dat", 'r'))
        transform_c1_to_c2 = data['transform']
        transform = True
    except:
        transform = False
        pass

    current_frame = 0
    all_joint_ims_z = []
    all_joint_ims_c = []
    framerate = 1
    while cam.next():
        # print "Frame ", current_frame
        # Update frames
        if n_cameras == 2:
            cam2.next()
            # cam2.sync_cameras(cam)
        current_frame += 1
        if current_frame % framerate != 0:
            # current_frame += 1
            continue

        # Transform skels from cam1 to cam2
        # cam_skels = [np.array(cam.users[s]['jointPositions'].values()) for s in cam.users.keys()]
        # cam_skels = [np.array(cam.users[s]['jointPositions'].values()) for s in cam.users]
        # Get rid of bad skels
        # cam_skels = [s for s in cam_skels if np.all(s[0] != -1)]

        # if len(cam_skels) == 0:
        # 	continue

        # Save images
        if 0:
            joint_ims_z = []
            joint_ims_c = []
            dx = 10
            skel_tmp = skel2depth(cam_skels[0], [240, 320])
            for j_pos in skel_tmp:
                # embed()
                joint_ims_z += [
                    cam.depthIm[j_pos[0] - dx:j_pos[0] + dx,
                                j_pos[1] - dx:j_pos[1] + dx]
                ]
                joint_ims_c += [
                    cam.colorIm[j_pos[0] - dx:j_pos[0] + dx,
                                j_pos[1] - dx:j_pos[1] + dx]
                ]
            if len(joint_ims_z) > 0:
                all_joint_ims_z += [joint_ims_z]
                all_joint_ims_c += [joint_ims_c]

        if 1:
            # if transform:
            # cam2_skels = transform_skels(cam_skels, transform_c1_to_c2, 'image')

            # try:
            # depth = cam2.get_person()
            # if learn:
            # 	rf.add_frame(depth, cam2_skels[0])
            # else:
            # 	rf.infer_pose(depth)
            # except:
            # pass
            if visualize:
                # cam2.depthIm = display_skeletons(cam2.depthIm, cam2_skels[0], (5000,), skel_type='Low')
                # skel1 = kinect_to_msr_skel(skel2depth(cam_skels[0], [240,320]))
                # cam.depthIm = display_skeletons(cam.depthIm, skel1, (5000,), skel_type='Low')
                # embed()
                cam.visualize(color=True,
                              depth=True,
                              text=True,
                              colorize=True,
                              depth_bounds=[500, 3500])
                if n_cameras == 2:
                    cam2.visualize(color=True, depth=True)

    embed()

    print 'Done'
Example #8
0
def main(visualize=False, learn=False):
	# Init both cameras
	# fill = True
	fill = False
	get_color = True
	cam = KinectPlayer(base_dir='./', device=1, bg_subtraction=True, get_depth=True, get_color=get_color, get_skeleton=True, fill_images=fill)
	cam2 = KinectPlayer(base_dir='./', device=2, bg_subtraction=True, get_depth=True, get_color=get_color, get_skeleton=True, fill_images=fill)
	# Transformation matrix from first to second camera
	data = pickle.load(open("Registration.dat", 'r'))
	transform_c1_to_c2 = data['transform']

	# Get random forest parameters
	if learn:
		rf = RFPose(offset_max=100, feature_count=300)
	else:
		rf = RFPose()
		rf.load_forest()

	ii = 0
	# cam.next(200)
	all_joint_ims_z = []
	all_joint_ims_c = []
	while cam.next() and ii < 200:
		# Update frames
		cam2.sync_cameras(cam)
		if ii%10 != 0:
			ii += 1
			continue

		# Transform skels from cam1 to cam2
		cam_skels = [np.array(cam.users[s]['jointPositions'].values()) for s in cam.users.keys()]
		# Get rid of bad skels
		cam_skels = [s for s in cam_skels if np.all(s[0] != -1)]

		if len(cam_skels) == 0:
			continue
		ii+=1

		# Save images
		if 1:
			joint_ims_z = []
			joint_ims_c = []
			dx = 10
			skel_tmp = skel2depth(cam_skels[0], [240,320])
			for j_pos in skel_tmp:
				# embed()
				joint_ims_z += [cam.depthIm[j_pos[0]-dx:j_pos[0]+dx, j_pos[1]-dx:j_pos[1]+dx]]
				joint_ims_c += [cam.colorIm[j_pos[0]-dx:j_pos[0]+dx, j_pos[1]-dx:j_pos[1]+dx]]
			if len(joint_ims_z) > 0:
				all_joint_ims_z += [joint_ims_z]
				all_joint_ims_c += [joint_ims_c]

		if 0:
			cam2_skels = transform_skels(cam_skels, transform_c1_to_c2, 'image')

			try:
				depth = cam2.get_person()
				if learn:
					rf.add_frame(depth, cam2_skels[0])
				else:
					rf.infer_pose(depth)

				if visualize:
					cam2.depthIm = display_skeletons(cam2.depthIm, cam2_skels[0], (5000,), skel_type='Low')
					skel1 = kinect_to_msr_skel(skel2depth(cam_skels[0], [240,320]))
					cam.depthIm = display_skeletons(cam.depthIm, skel1, (5000,), skel_type='Low')
					cam.visualize()
					cam2.visualize()
			except:
				pass


	embed()
	if learn:
		print "Starting forest"
		rf.learn_forest()

	print 'Done'
def main_infer(rf_name=None):
	'''
	'''

	if rf_name is None:
		import os
		files = os.listdir('Saved_Params/')
		rf_name = 'Saved_Params/' + files[-1]
		print "Classifier file:",rf_name


	# Load classifier data
	data =  pickle.load(open(rf_name))
	rf = data['rf']
	offsets_1, offsets_2 = data['offsets']

	''' Read data from each video/sequence '''
	depthIms = []
	skels_world = []
	if 1:
		# VP = KinectPlayer(base_dir='/Users/colin/Data/Office_25Feb2013/', device=2, get_depth=True, get_color=False, get_skeleton=True, get_mask=False)
		VP = KinectPlayer(base_dir='/Users/colin/Data/Room_close_26Feb13/', device=1, get_depth=True, get_color=False, get_skeleton=True, get_mask=False)
		_,_ = VP.get_n_skeletons(50)
		depthIms, skels_world = VP.get_n_skeletons(100)
		# depthIms = np.array(depthIms)[:,:,::-1]

	else:
		# name = 'a01_s01_e02_'
		name = 'a01_s10_e02_'
		# name = 'a02_s06_e02_'
		# name = 'a05_s02_e02_'
		depth_file = name + "depth.bin"
		color_file = name + "rgb.avi"
		skeleton_file = name + "skeleton.txt"

		try:
			depthIms, maskIms = read_MSR_depth_ims(depth_file)
			depthIms *= maskIms
			depthIms /= 10
			# colorIms = read_MSR_color_ims(color_file)
			skels_world, skels_im = read_MSR_skeletons(skeleton_file)
			skels_world[:,2]/=10
		except:
			print "Error reading data"


	''' Process data '''
	all_pred_ims = []
	for i in xrange(1, len(depthIms), 1):
		# try:
		if 1:
			print i
			''' Get frame data '''
			im_depth = depthIms[i]
			# im_depth[160:, :] = 0
			skel_pos = skel2depth(skels_world[i], rez=[240,320])
			# ''' --- NOTE THIS 10 PX OFFSET IN THE MSR DATASET !!! --- '''
			# skel_pos[:,0] -= 10

			skel_pos_pred, im_predict = infer_pose(im_depth, rf, offsets_1, offsets_2)

			# Overlay skeletons
			if 1:
				# colorIm = colorIms[i]
				# im_predict = colorIm
				cv2.imshow("forest_prediction", im_predict/float(im_predict.max()))

				im_predict = np.repeat(im_depth[:,:,None].astype(np.float), 3, -1)
				# embed()
				im_predict[im_depth>0] -= im_depth[im_depth>0].min()
				im_predict /= float(im_predict.max()/255.)
				im_predict = im_predict.astype(np.uint8)


				# im_predict = np.repeat(im_predict[:,:,None], 3, -1)
				# im_predict /= float(im_predict.max())*255
				# im_predict = im_predict.astype(np.uint8)
				im_predict = display_skeletons(im_predict, skel_pos, (255,0,0), SKEL_DISPLAY_MODE)
				im_predict = display_skeletons(im_predict, skel_pos_pred, (0,255,0), SKEL_DISPLAY_MODE)
				im_predict = display_skeletons(im_predict, skel_pos, (255,0,0), SKEL_DISPLAY_MODE)
				im_predict = display_skeletons(im_predict, skel_pos_pred, (0,255,0), SKEL_DISPLAY_MODE)

				# embed()
				# max_ = (im_predict * (im_predict < 255)).max()

			all_pred_ims += [im_predict]

			''' Visualize '''
			if 1:
				cv2.putText(im_predict, "Blue=Truth", (10, 210), cv2.FONT_HERSHEY_DUPLEX, .5, (int(im_predict.max()/2), 0, 0))
				cv2.putText(im_predict, "Green=Predict", (10, 230), cv2.FONT_HERSHEY_DUPLEX, .5, (0, int(im_predict.max()/2), 0))
				cv2.imshow("prediction", im_predict)

				ret = cv2.waitKey(10)
				if ret > 0: break

				time.sleep(.5)

		# except:
		# 	print "Frame failed:", i
		# 	break

	embed()
Example #10
0
def main_learn():
	'''
	'''

	offsets_1, offsets_2 = create_rf_offsets()
	all_features = []
	all_labels = []

	''' My data '''
	if 1:
		# VP = KinectPlayer(base_dir='/Users/colin/Data/Office_25Feb2013/', device=2, get_depth=True, get_color=False, get_skeleton=True, get_mask=False)
		# VP = KinectPlayer(base_dir='/Users/colin/Data/Room_close_26Feb13/', device=1, get_depth=True, get_color=False, get_skeleton=True, get_mask=False)

		# Scrap first 50 frames
		# depthIms, skels_world = VP.get_n_skeletons(50)
		depthIms, skels_world = VP.get_n_skeletons(400)
		# depthIms, skels_world = VP.get_n_skeletons(-1)
		print "{0:d} frames loaded".format(len(depthIms))
		# for i in range(0, len(depthIms), 1):
		for i in range(0, 40):
			# try:
			if 1:
				print "Frame", i
				depthIms, skels_world = VP.get_n_skeletons(10)
				if depthIms == -1:
					break
				depth = depthIms[0]
				skel_w = skels_world[0]
				# depth = depthIms[i]
				# skel_w = skels_world[i]
				skel_im = skel2depth(skel_w, rez=[240,320])
				features, pixel_labels = learn_frame(depth, skel_im, offsets_1, offsets_2)

				all_features += [features]
				all_labels += [pixel_labels]
			# except:
				# print "Error in frame {0:d}".format(i)
		del depthIms, skels_world

	''' MSR data'''
	if 0:
		names = create_MSR_filenames(np.arange(1)+1, np.arange(3, 5)+1, [2])
		# Parallelize feature collection
		from joblib import Parallel, delayed
		if 1:
			for n_set in chunks(names, 1):
				print "Computing with one thread. Current feature count:", len(all_features)
				all_data = learn_msr_video(n_set[0], offsets_1, offsets_2)
				if all_features == []:
					all_features = all_data[0]
					all_labels = all_data[1]
				else:
					all_features = np.vstack([all_features, all_data[0]])
					all_labels = np.hstack([all_labels, all_data[1]])

		else:
			for n_set in chunks(names, 100):
				print "Computing with multiple threads. Current feature count:", len(all_features)
				all_data = Parallel(n_jobs=-1, verbose=True, pre_dispatch=2)( delayed(learn_frame)(n, offsets_1, offsets_2) for n in n_set )
				# Account for bad frames
				all_features += [f[0] for f in all_data if f[0] is not -1]
				all_labels += [f[1] for f in all_data if f[1] is not -1]
				print "Done computing this set of features/labels"

	print "Done computing all features/labels"
	all_features = np.vstack(all_features)
	all_labels = np.hstack(all_labels)

	print "Starting forest"
	rf = RFClassifier(n_estimators=6,
						criterion='entropy',\
	 					max_depth=20,
	 					max_features='auto',\
	  					oob_score=False,\
	  					n_jobs=-1,
	  					random_state=None,
	  					verbose=1,\
	  					min_samples_leaf=1)

	rf.fit(all_features, all_labels)

	save_data("Saved_Params/"+str(time.time())+"_forests.dat", {'rf':rf, 'offsets':[offsets_1, offsets_2]})
Example #11
0
def main_infer(rf_name=None):
    '''
	'''

    if rf_name is None:
        import os
        files = os.listdir('Saved_Params/')
        rf_name = 'Saved_Params/' + files[-1]
        print "Classifier file:", rf_name

    # Load classifier data
    data = pickle.load(open(rf_name))
    rf = data['rf']
    offsets_1, offsets_2 = data['offsets']
    ''' Read data from each video/sequence '''
    depthIms = []
    skels_world = []
    if 1:
        # VP = KinectPlayer(base_dir='/Users/colin/Data/Office_25Feb2013/', device=2, get_depth=True, get_color=False, get_skeleton=True, get_mask=False)
        VP = KinectPlayer(base_dir='/Users/colin/Data/Room_close_26Feb13/',
                          device=1,
                          get_depth=True,
                          get_color=False,
                          get_skeleton=True,
                          get_mask=False)
        _, _ = VP.get_n_skeletons(50)
        depthIms, skels_world = VP.get_n_skeletons(100)
        # depthIms = np.array(depthIms)[:,:,::-1]

    else:
        # name = 'a01_s01_e02_'
        name = 'a01_s10_e02_'
        # name = 'a02_s06_e02_'
        # name = 'a05_s02_e02_'
        depth_file = name + "depth.bin"
        color_file = name + "rgb.avi"
        skeleton_file = name + "skeleton.txt"

        try:
            depthIms, maskIms = read_MSR_depth_ims(depth_file)
            depthIms *= maskIms
            depthIms /= 10
            # colorIms = read_MSR_color_ims(color_file)
            skels_world, skels_im = read_MSR_skeletons(skeleton_file)
            skels_world[:, 2] /= 10
        except:
            print "Error reading data"
    ''' Process data '''
    all_pred_ims = []
    for i in xrange(1, len(depthIms), 1):
        # try:
        if 1:
            print i
            ''' Get frame data '''
            im_depth = depthIms[i]
            # im_depth[160:, :] = 0
            skel_pos = skel2depth(skels_world[i], rez=[240, 320])
            # ''' --- NOTE THIS 10 PX OFFSET IN THE MSR DATASET !!! --- '''
            # skel_pos[:,0] -= 10

            skel_pos_pred, im_predict = infer_pose(im_depth, rf, offsets_1,
                                                   offsets_2)

            # Overlay skeletons
            if 1:
                # colorIm = colorIms[i]
                # im_predict = colorIm
                cv2.imshow("forest_prediction",
                           im_predict / float(im_predict.max()))

                im_predict = np.repeat(im_depth[:, :, None].astype(np.float),
                                       3, -1)
                # embed()
                im_predict[im_depth > 0] -= im_depth[im_depth > 0].min()
                im_predict /= float(im_predict.max() / 255.)
                im_predict = im_predict.astype(np.uint8)

                # im_predict = np.repeat(im_predict[:,:,None], 3, -1)
                # im_predict /= float(im_predict.max())*255
                # im_predict = im_predict.astype(np.uint8)
                im_predict = display_skeletons(im_predict, skel_pos,
                                               (255, 0, 0), SKEL_DISPLAY_MODE)
                im_predict = display_skeletons(im_predict, skel_pos_pred,
                                               (0, 255, 0), SKEL_DISPLAY_MODE)
                im_predict = display_skeletons(im_predict, skel_pos,
                                               (255, 0, 0), SKEL_DISPLAY_MODE)
                im_predict = display_skeletons(im_predict, skel_pos_pred,
                                               (0, 255, 0), SKEL_DISPLAY_MODE)

                # embed()
                # max_ = (im_predict * (im_predict < 255)).max()

            all_pred_ims += [im_predict]
            ''' Visualize '''
            if 1:
                cv2.putText(im_predict, "Blue=Truth", (10, 210),
                            cv2.FONT_HERSHEY_DUPLEX, .5,
                            (int(im_predict.max() / 2), 0, 0))
                cv2.putText(im_predict, "Green=Predict", (10, 230),
                            cv2.FONT_HERSHEY_DUPLEX, .5,
                            (0, int(im_predict.max() / 2), 0))
                cv2.imshow("prediction", im_predict)

                ret = cv2.waitKey(10)
                if ret > 0: break

                time.sleep(.5)

        # except:
        # 	print "Frame failed:", i
        # 	break

    embed()
Example #12
0
def main_learn():
    '''
	'''

    offsets_1, offsets_2 = create_rf_offsets()
    all_features = []
    all_labels = []
    ''' My data '''
    if 1:
        # VP = KinectPlayer(base_dir='/Users/colin/Data/Office_25Feb2013/', device=2, get_depth=True, get_color=False, get_skeleton=True, get_mask=False)
        # VP = KinectPlayer(base_dir='/Users/colin/Data/Room_close_26Feb13/', device=1, get_depth=True, get_color=False, get_skeleton=True, get_mask=False)

        # Scrap first 50 frames
        # depthIms, skels_world = VP.get_n_skeletons(50)
        depthIms, skels_world = VP.get_n_skeletons(400)
        # depthIms, skels_world = VP.get_n_skeletons(-1)
        print "{0:d} frames loaded".format(len(depthIms))
        # for i in range(0, len(depthIms), 1):
        for i in range(0, 40):
            # try:
            if 1:
                print "Frame", i
                depthIms, skels_world = VP.get_n_skeletons(10)
                if depthIms == -1:
                    break
                depth = depthIms[0]
                skel_w = skels_world[0]
                # depth = depthIms[i]
                # skel_w = skels_world[i]
                skel_im = skel2depth(skel_w, rez=[240, 320])
                features, pixel_labels = learn_frame(depth, skel_im, offsets_1,
                                                     offsets_2)

                all_features += [features]
                all_labels += [pixel_labels]
            # except:
            # print "Error in frame {0:d}".format(i)
        del depthIms, skels_world
    ''' MSR data'''
    if 0:
        names = create_MSR_filenames(
            np.arange(1) + 1,
            np.arange(3, 5) + 1, [2])
        # Parallelize feature collection
        from joblib import Parallel, delayed
        if 1:
            for n_set in chunks(names, 1):
                print "Computing with one thread. Current feature count:", len(
                    all_features)
                all_data = learn_msr_video(n_set[0], offsets_1, offsets_2)
                if all_features == []:
                    all_features = all_data[0]
                    all_labels = all_data[1]
                else:
                    all_features = np.vstack([all_features, all_data[0]])
                    all_labels = np.hstack([all_labels, all_data[1]])

        else:
            for n_set in chunks(names, 100):
                print "Computing with multiple threads. Current feature count:", len(
                    all_features)
                all_data = Parallel(n_jobs=-1, verbose=True, pre_dispatch=2)(
                    delayed(learn_frame)(n, offsets_1, offsets_2)
                    for n in n_set)
                # Account for bad frames
                all_features += [f[0] for f in all_data if f[0] is not -1]
                all_labels += [f[1] for f in all_data if f[1] is not -1]
                print "Done computing this set of features/labels"

    print "Done computing all features/labels"
    all_features = np.vstack(all_features)
    all_labels = np.hstack(all_labels)

    print "Starting forest"
    rf = RFClassifier(n_estimators=6,
         criterion='entropy',\
          max_depth=20,
          max_features='auto',\
           oob_score=False,\
           n_jobs=-1,
           random_state=None,
           verbose=1,\
           min_samples_leaf=1)

    rf.fit(all_features, all_labels)

    save_data("Saved_Params/" + str(time.time()) + "_forests.dat", {
        'rf': rf,
        'offsets': [offsets_1, offsets_2]
    })
Example #13
0
def main(visualize=False, learn=False):
    # Init both cameras
    # fill = True
    fill = False
    get_color = True
    cam = KinectPlayer(base_dir='./',
                       device=1,
                       bg_subtraction=True,
                       get_depth=True,
                       get_color=get_color,
                       get_skeleton=True,
                       fill_images=fill)
    cam2 = KinectPlayer(base_dir='./',
                        device=2,
                        bg_subtraction=True,
                        get_depth=True,
                        get_color=get_color,
                        get_skeleton=True,
                        fill_images=fill)
    # Transformation matrix from first to second camera
    data = pickle.load(open("Registration.dat", 'r'))
    transform_c1_to_c2 = data['transform']

    # Get random forest parameters
    if learn:
        rf = RFPose(offset_max=100, feature_count=300)
    else:
        rf = RFPose()
        rf.load_forest()

    ii = 0
    # cam.next(200)
    all_joint_ims_z = []
    all_joint_ims_c = []
    while cam.next() and ii < 200:
        # Update frames
        cam2.sync_cameras(cam)
        if ii % 10 != 0:
            ii += 1
            continue

        # Transform skels from cam1 to cam2
        cam_skels = [
            np.array(cam.users[s]['jointPositions'].values())
            for s in cam.users.keys()
        ]
        # Get rid of bad skels
        cam_skels = [s for s in cam_skels if np.all(s[0] != -1)]

        if len(cam_skels) == 0:
            continue
        ii += 1

        # Save images
        if 1:
            joint_ims_z = []
            joint_ims_c = []
            dx = 10
            skel_tmp = skel2depth(cam_skels[0], [240, 320])
            for j_pos in skel_tmp:
                # embed()
                joint_ims_z += [
                    cam.depthIm[j_pos[0] - dx:j_pos[0] + dx,
                                j_pos[1] - dx:j_pos[1] + dx]
                ]
                joint_ims_c += [
                    cam.colorIm[j_pos[0] - dx:j_pos[0] + dx,
                                j_pos[1] - dx:j_pos[1] + dx]
                ]
            if len(joint_ims_z) > 0:
                all_joint_ims_z += [joint_ims_z]
                all_joint_ims_c += [joint_ims_c]

        if 0:
            cam2_skels = transform_skels(cam_skels, transform_c1_to_c2,
                                         'image')

            try:
                depth = cam2.get_person()
                if learn:
                    rf.add_frame(depth, cam2_skels[0])
                else:
                    rf.infer_pose(depth)

                if visualize:
                    cam2.depthIm = display_skeletons(cam2.depthIm,
                                                     cam2_skels[0], (5000, ),
                                                     skel_type='Low')
                    skel1 = kinect_to_msr_skel(
                        skel2depth(cam_skels[0], [240, 320]))
                    cam.depthIm = display_skeletons(cam.depthIm,
                                                    skel1, (5000, ),
                                                    skel_type='Low')
                    cam.visualize()
                    cam2.visualize()
            except:
                pass

    embed()
    if learn:
        print "Starting forest"
        rf.learn_forest()

    print 'Done'