Ejemplo n.º 1
0
def toggle_unreal():
	global g_TIS_server
	streaming_TIS = State.getKey('/root/ui/attrs/streaming_TIS')
	if streaming_TIS:
		g_TIS_server.Stop()
		State._setKey('/root/ui/attrs/streaming_TIS', False)
		print "Stopping Server"
	else:
		print 'Trying to start Server'
		if g_TIS_server.Start('',6500):
			State._setKey('/root/ui/attrs/streaming_TIS', True)
		else:
			print 'ARGH!!! Failed to start server'
Ejemplo n.º 2
0
def rbfn_view_cb(fi, attrs):
	# g_mode = 1
	global g_rbfn
	group,gn,pn,slider_indices,slider_names,pose_splits = rbfn_info_from_frame(fi)

	QApp.view().displayText = [(10, 100, gn), (10,125, pn)]
	img = group['images'][pn]
	img = JPEG.decompress(img)
	h,wm = img.shape[0]*0.5,img.shape[1]*0.5

	out_shape = extract_x2ds(group, pn, g_rbfn['marker_names'])
	
	svs = group['slider_data'][pn][slider_indices]
	State._setKey('/root/sliders/attrs', dict(zip(slider_names, svs))) # NO UNDO

	# compensate for roll, translation and scale
	norm_shape, head_pan, head_tilt, A = stabilize_shape(out_shape)

	# extract angles from the measured values
	mirror_scale = -1 if attrs['mirroring'] else 1
	new_pose = np.degrees(np.arctan2([head_pan*mirror_scale, head_tilt, -mirror_scale*A[1][0]],[2,2,A[1][1]]))
	
	head_roll = -np.arctan2(A[1][0],A[1][1])
	head_pan = np.arctan2(head_pan, 2.0)
	head_tilt = np.arctan2(head_tilt, 2.0)
	#print head_roll, head_pan, head_tilt

	slider_names, slider_values = applyRetarget(g_rbfn, norm_shape)
	svs[np.where(svs < 1e-4)] = 0
	slider_values[np.where(slider_values < 1e-4)] = 0
	#print zip(slider_values,svs)
	slider_names.extend(['NeckRoll','NeckPan','NeckTilt'])
	svs = np.clip(svs,0,1)
	slider_values = np.float32(list(svs)+list(np.degrees([head_roll,head_pan,head_tilt])))

	return new_pose,out_shape,norm_shape,img,slider_names,slider_values,A
Ejemplo n.º 3
0
def setFrame_cb(fi):
	attrs = State.getKey('/root/ui/attrs/')
	global g_setting_frame
	if g_setting_frame: return
	g_setting_frame = True
	try: # within this loop we handle the timeline, which could trigger calling this function recursively
		global g_mode, g_frame, g_TIS_server, g_neutral_corrective_shape
		global g_smooth_pose
		view = QApp.view()
		cid = view.cameraIndex()
		if cid != g_mode: # deal with changing modes
			g_mode = cid
			if g_mode == 0:
				if g_md is not None: QApp.app.qtimeline.setRange(0, g_md['vmaxframe'])
			elif g_mode == 1:
				pose_splits = rbfn_pose_splits()
				QApp.app.qtimeline.setRange(0, pose_splits[-1]-1)
			new_frame = g_frame.get(g_mode,fi)
			if new_frame != fi:
				QApp.app.qtimeline.frame = new_frame
				fi = new_frame
	except Exception as e:
		print 'exc setFrame',e
	g_setting_frame = False
	g_frame[g_mode] = fi
	
	if not attrs['setting_neutral']: g_neutral_corrective_shape = 0
	
	new_pose,new_shape,norm_shape,img,slider_names,slider_values,A = [track_view_cb,rbfn_view_cb][g_mode](fi,attrs)

	
	mirror_scale = -1 if attrs['mirroring'] else 1
	h,wm = img.shape[0]*0.5,img.shape[1]*0.5*mirror_scale

	geo_vs = np.zeros((new_shape.shape[0],3), dtype=np.float32)	
	if attrs['debugging']: # display the stabilised data
		geo_vs[:,:2] = norm_shape
		geo_vs *= 200
		geo_vs[:,:2] += np.int32(np.mean(new_shape, axis=0)/200)*200
	else: # display the tracking data
		geo_vs[:,:2] = new_shape

	geo_mesh,image_mesh,bs_mesh = QApp.app.getLayers(['geo_mesh', 'image_mesh', 'bs_mesh'])
	
	bs_mesh.visible = attrs['show_harpy']
	if bs_mesh.visible:
		global g_bs_vs, g_bs_shape_mat_T
		bs_mesh.setVs(g_bs_vs + np.dot(g_bs_shape_mat_T, np.clip(slider_values[:-3],0,1)))
		# compute the Harpy position
		R = Calibrate.composeR(new_pose*[1,-1,-1])
		if g_mode == 1: R = np.eye(3) # TODO
		bs_ts = Calibrate.composeRT(R,[0,1720,0],0) # compensate for the offset of the Harpy (temples ~1720mm above origin)
		scale = 1.0/np.linalg.norm(160.*A) # IPD (64mm) / 0.4 (ref_shape) = 160.
		off = np.mean(new_shape[[0,16]],axis=0) # get the position of the temples (pixels)
		g_smooth_pose[g_mode] = filter_data(np.float32([scale,off[0],off[1]]), g_smooth_pose.setdefault(g_mode,None), 10.0)
		pose = g_smooth_pose[g_mode]
		bs_ts[:3] *= pose[0]
		bs_ts[:3,3] += [pose[1]-abs(wm),1000+pose[2]-h,0]
		# offset screen-right 300mm
		bs_ts[:3,3] += (pose[0]*attrs['harpy_xoffset'])*np.float32([np.cos(np.radians(view.camera.cameraRoll)),-np.sin(np.radians(view.camera.cameraRoll)),0.0])
		bs_mesh.transforms[0] = bs_ts.T
	
	geo_mesh.setVs(geo_vs)
	geo_mesh.colour=[0 if attrs['streaming_TIS'] else 1,1 if attrs['streaming_TIS'] else 0,0,1]
	geo_mesh.transforms[0][:,:3] = [[mirror_scale,0,0],[0,1,0],[0,0,1],[-wm,1000-h,0.1]]
	image_mesh.setVs(np.float32([[-wm,-h,0],[wm,-h,0],[wm,h,0],[-wm,h,0]]))
	image_mesh.setImage(img)
	if attrs['unreal']:
		if not attrs['streaming_TIS']: toggle_unreal()
		ret, activeConnections = g_TIS_server.WriteAll(PyTISStream.getBlendshapeData(slider_names, slider_values))
		if not ret:
			print "Server is not Initialised"
			State._setKey('/root/ui/attrs/streaming_TIS', False)
	else:
		# Turn off streaming
		if attrs['streaming_TIS']: toggle_unreal()
	QApp.app.updateGL()