コード例 #1
0
ファイル: FaceTrack.py プロジェクト: davidsoncolin/IMS
def set_frame_cb2(frame):
	global g_predictor, g_predictor_dlib, g_detector
	size = (len(g_predictor['ref_shape'])+4)
	geo_vs = np.zeros((size,3), dtype=np.float32)
	ref_vs = np.zeros((size,3), dtype=np.float32)

	global g_prev_vs
	try: g_prev_vs
	except: g_prev_vs = None
	if 0: # show_images
		global g_jpgs; fn = g_jpgs[frame%len(g_jpgs)]
		img = Face.load_image(fn)
		img = Face.fix_image(img, max_size=640)
		use_prev_vs = False # images need booting every frame
	else: # show_movies
		global md; MovieReader.readFrame(md, seekFrame=frame) # only update the visible camera
		img = np.frombuffer(md['vbuffer'], dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3)
		use_prev_vs = True
		
	if 0: # undistort_stuff
		global g_screen
		global g_tid, g_bid
		g_tid,g_bid = Opengl.bind_streaming_image(img, g_tid, g_bid)
		img = Opengl.renderGL(img.shape[1], img.shape[0], Opengl.quad_render, (g_tid, g_screen, 0.85))
		#Opengl.unbind_image(bid)

	if 0: # rotated_image
		img = img.transpose((1,0,2)).copy()
	if 0: # gamma_image
		lookup = np.array([int(((x/255.0)**0.4545)*255.0) for x in range(256)], dtype=np.uint8)
		img = lookup[img]
	#img[:,600:1000] = 0 #img[:,200:600].copy()
	if 0: # test_rotate
		import scipy; img = scipy.misc.imrotate(img, frame, interp='bilinear')
	if 0: # test_rotate_right
		import scipy; img[:,-img.shape[0]:] = scipy.misc.imrotate(img[:,-img.shape[0]:], frame, interp='bilinear')
	if 0: # test_filter_image
		img = ISCV.filter_image(img,4,16)

	w,h = img.shape[1]*0.5,img.shape[0]*0.5

	boot = g_prev_vs
	if boot is None: boot = Face.detect_face(img, g_predictor, 2) # ,-1) # put -1 at end to boot at any angle
	tmp = Face.track_face(img, g_predictor, boot)
	if use_prev_vs and boot is not None: g_prev_vs = tmp
	if frame == 0 or Face.test_reboot(img, g_prev_vs): g_prev_vs = None
	global template_vs
	geo_vs[:size-4,:2] = tmp
	geo_vs[size-4:size,:2] = Face.get_boundary(geo_vs[:size-4,:2], template_vs)

	if 0: # show_aam
		global g_aam_model
		shape_u, tex_u, A_inv, mn  = Face.fit_aam(g_aam_model, tmp, img)
		Face.render_aam(g_aam_model, A_inv*0.1, mn*0.1, shape_u, tex_u, img)
		su,tu = Face.normalized_aam_coords(g_aam_model, shape_u, tex_u)
		res = Face.aam_residual(g_aam_model, tmp, img)
		QApp.view().displayText = [(10,100,'%f' % np.linalg.norm(tu)),(10,125,'%f' % np.linalg.norm(su)),(10,150,'%f'%res)]

	if 0: # show_extracted_texture
		global g_aam_model_indices,g_aam_model_weights
		pixels = Face.extract_texture(img, geo_vs[:size,:2], g_aam_model_indices, g_aam_model_weights)
		global template_vs
		Face.render_texture(pixels, img, template_vs, g_aam_model_indices, g_aam_model_weights)

	geo_mesh = QApp.app.getLayer('geo_mesh')
	geo_mesh.setVs(geo_vs)
	geo_mesh.transforms[0][:,:3] = [[1,0,0],[0,1,0],[0,0,1],[-w,1000-h,0.1]]
	image_mesh = QApp.app.getLayer('image_mesh')
	image_mesh.setVs(np.array([[-w,-h,0],[w,-h,0],[w,h,0],[-w,h,0]], dtype=np.float32))
	image_mesh.setImage(img)
	QApp.view().updateGL()
コード例 #2
0
def filter_movie_frame(img, small_blur, large_blur):
    mx = min(img.shape[0], img.shape[1]) / 2
    large_blur = min(max(large_blur, 1), mx - 1)
    small_blur = min(max(small_blur, 0), large_blur - 1)
    filtered_img = ISCV.filter_image(img, small_blur, large_blur)
    return filtered_img