Ejemplo n.º 1
0
	def RT(self, upright=False):
		'''Yields the extrinsic matrix for the view [R T].'''
		return Calibrate.composeRT(self.R(upright),self.cameraT,self.cameraInterest)
Ejemplo n.º 2
0
def main():
    global State, mats, movieFilenames, primitives
    global movies, primitives2D, deinterlacing, detectingWands
    import IO
    import sys, os
    deinterlacing = False
    detectingWands = False
    detectingTiara = False
    dot_detections = None
    detections_filename = None
    frame_offsets = None
    firstFrame, lastFrame = 0, 5000
    drawDotSize = 4.0
    fovX, (ox,
           oy), pan_tilt_roll, tx_ty_tz, distortion = 50., (0,
                                                            0), (0, 0,
                                                                 0), (0, 1250,
                                                                      0), (0,
                                                                           0)
    mats = []
    grip_directory = os.environ['GRIP_DATA']

    if 0:
        fovX, (ox, oy), pan_tilt_roll, tx_ty_tz, distortion = 37.9, (0, 0), (
            -66.0, 3.5, -0.2), (4850, 1330, 3280), (0, 0)  # roughed in
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mat0 = [
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]
        fovX, (ox, oy), pan_tilt_roll, tx_ty_tz, distortion = 55.8, (0, 0), (
            -103.6, 3.5, -0.3), (2980, 1380, -2180), (0, 0)  # roughed in
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mat1 = [
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]
        fovX, (ox, oy), pan_tilt_roll, tx_ty_tz, distortion = 49.3, (0, 0), (
            27.9, 4.0, -0.2), (-5340, 1150, 5030), (0, 0)  # roughed in
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mat2 = [
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]
        fovX, (ox, oy), pan_tilt_roll, tx_ty_tz, distortion = 50.6, (0, 0), (
            -156.6, 4.9, 0.2), (-105, 1400, -4430), (0, 0)  # roughed in
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mat3 = [
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]
        mats = [mat0, mat1, mat2, mat3]
        xcp_filename = '154535_Cal168_Floor_Final.xcp'
        directory = os.path.join(grip_directory, 'REFRAME')
        movieFilenames = [
            '001E0827_01.MP4', '001F0813_01.MP4', '001G0922_01.MP4',
            '001H0191_01.MP4'
        ]
        #mats,movieFilenames = mats[:1],movieFilenames[:1] # restrict to single-view
        frame_offsets = [119 + 160, 260, 339, 161]
        small_blur, large_blur = 1, 25
        min_dot_size = 1.0
        max_dot_size = 20.0
        circularity_threshold = 3.0
        threshold_bright, threshold_dark_inv = 250, 250  #135,135
    elif 0:
        xcp_filename = '201401211653-4Pico-32_Quad_Dialogue_01_Col_wip_01.xcp'
        detections_filename = 'detections.dat'
        detectingTiara = True
        pan_tilt_roll = (0, 0, 90)
        distortion = (0.291979, 0.228389)
        directory = os.path.join(os.environ['GRIP_DATA'], 'ted')
        movieFilenames = [
            '201401211653-4Pico-32_Quad_Dialogue_01_%d.mpg' % xi
            for xi in range(1)
        ]
        firstFrame = 511
        small_blur, large_blur = 1, 20
        min_dot_size = 1.0
        max_dot_size = 16.0
        circularity_threshold = 3.0
        threshold_bright, threshold_dark_inv = 0, 170
    elif 1:
        xcp_filename = '50_Grip_RoomCont_AA_02.xcp'
        detections_filename = 'detections.dat'
        pan_tilt_roll = (0, 0, 0)
        distortion = (0.291979, 0.228389)
        directory = os.path.join(os.environ['GRIP_DATA'], '151110')
        movieFilenames = ['50_Grip_RoomCont_AA_02.v2.mov']
        firstFrame = 0
        small_blur, large_blur = 1, 20
        min_dot_size = 1.0
        max_dot_size = 16.0
        circularity_threshold = 3.0
        threshold_bright, threshold_dark_inv = 170, 170

    attrs = dict([(v, eval(v)) for v in [
        'small_blur', 'large_blur', 'threshold_bright', 'threshold_dark_inv',
        'circularity_threshold', 'min_dot_size', 'max_dot_size'
    ]])

    primitives2D = QGLViewer.makePrimitives2D(([], []), ([], []))
    primitives = []
    if len(movieFilenames) is 1:
        # TODO: time_base, timecode
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mats = [[
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]]
        camera_ids = ['video']
        movies = [
            MovieReader.open_file(os.path.join(directory, movieFilenames[0]),
                                  audio=False)
        ]
    else:  # hard coded cameras
        if xcp_filename.endswith('.xcp'):
            if detectingTiara:  # gruffalo
                c3d_filename = os.path.join(
                    directory,
                    '201401211653-4Pico-32_Quad_Dialogue_01_Col_wip_02.c3d')
                from IO import C3D
                c3d_dict = C3D.read(c3d_filename)
                global c3d_frames
                c3d_frames, c3d_fps, c3d_labels = c3d_dict['frames'], c3d_dict[
                    'fps'], c3d_dict['labels']
                c3d_subject = ''  #'TedFace'
                which = np.where(
                    [s.startswith(c3d_subject) for s in c3d_labels])[0]
                c3d_frames = c3d_frames[:, which, :]
                c3d_labels = [c3d_labels[i] for i in which]
                print len(c3d_frames)
            xcp, xcp_data = ViconReader.loadXCP(
                os.path.join(directory, xcp_filename))
            mats.extend(xcp)
        elif xcp_filename.endswith('.cal'):
            from IO import OptitrackReader
            xcp, xcp_data = OptitrackReader.load_CAL(
                os.path.join(directory, xcp_filename))
            mats = xcp
            print 'mats', len(mats), len(movieFilenames)
            assert (len(mats) == len(movieFilenames))
        camera_ids = []
        movies = []
        for ci, mf in enumerate(movieFilenames):
            fo = 0 if frame_offsets is None else frame_offsets[ci]
            movies.append(
                MovieReader.open_file(os.path.join(directory, mf),
                                      audio=False,
                                      frame_offset=fo))
        camera_ids = ['cam_%d' % ci for ci in xrange(len(mats))]
        print len(mats), len(movies), len(camera_ids)
    primitives.append(GLPoints3D([]))
    primitives.append(GLPoints3D([]))
    primitives.append(GLPoints3D([]))
    primitives[0].colour = (0, 1, 1, 0.5)  # back-projected "cyan" points
    primitives[1].colour = (0, 0, 1, 0.5)
    primitives[1].pointSize = 5
    primitives[2].colour = (1, 0, 0, 0.99)

    if len(movieFilenames) != 1 and detections_filename != None:
        try:
            dot_detections = IO.load(detections_filename)[1]
        except:
            numFrames = len(c3d_frames)  # TODO HACK HACK
            dot_detections = movies_to_detections(movies, range(numFrames),
                                                  deinterlacing, attrs)
            IO.save(detections_filename, dot_detections)

        if detectingTiara:
            x3ds_seq = {}
            for fi in dot_detections.keys():
                frame = c3d_frames[(fi - 55) % len(c3d_frames)]
                which = np.array(np.where(frame[:, 3] == 0)[0], dtype=np.int32)
                x3ds_seq[fi] = np.concatenate((VICON_tiara_x3ds + np.array([150,-100,0],dtype=np.float32),frame[which,:3])), \
                      np.concatenate((np.arange(len(VICON_tiara_x3ds),dtype=np.int32),which+len(VICON_tiara_x3ds)))

            dot_labels = get_labels(dot_detections.keys(),
                                    x3ds_seq,
                                    dot_detections,
                                    mats,
                                    x2d_threshold=0.05)

            calibration_fi = 546 - 2 - 6

            RT = tighten_calibration(x3ds_seq[calibration_fi],
                                     dot_labels[calibration_fi], mats)
            for v in c3d_frames:
                v[:, :3] = np.dot(v[:, :3], RT[:3, :3].T) + RT[:, 3]

            if True:
                dot_detections = IO.load(detections_filename)[1]
                x3ds_seq = {}
                for fi in dot_detections.keys():
                    frame = c3d_frames[(fi - 55) % len(c3d_frames)]
                    which = np.array(np.where(frame[:, 3] == 0)[0],
                                     dtype=np.int32)
                    x3ds_seq[fi] = np.concatenate((VICON_tiara_x3ds + np.array([0,1000,0],dtype=np.float32),frame[which,:3])), \
                          np.concatenate((np.arange(len(VICON_tiara_x3ds),dtype=np.int32),which+len(VICON_tiara_x3ds)))

                #dot_labels = get_labels(dot_detections.keys(), x3ds_seq, dot_detections, mats, x2d_threshold = 0.05)

    if detectingTiara:
        primitives.append(GLPoints3D(VICON_tiara_x3ds + [0, 1000, 0]))
        primitives[-1].pointSize = 5

    global track3d, prev_frame, booting, trackGraph
    track3d = Label.Track3D(mats[:len(movies)],
                            x2d_threshold=0.03,
                            x3d_threshold=5.0,
                            min_rays=3,
                            boot_interval=2)  #tilt_threshold = 0.01, gruffalo
    trackGraph = Label.TrackGraph()
    prev_frame = 0
    booting = 1

    from UI import QApp
    from PySide import QtGui
    from GCore import State
    # Modified the options parameter for fields to be the range of acceptable values for the box
    # Previously would crash if small_blur got too low
    QApp.fields = {
        'image filter': [
            ('small_blur', 'Small blur radius',
             'This is part of the image filter which controls the size of smallest detected features.',
             'int', small_blur, {
                 "min": 0,
                 "max": None
             }),
            ('large_blur', 'Large blur radius',
             'This is part of the image filter which controls the size of largest detected features.',
             'int', large_blur, {
                 "min": 0,
                 "max": None
             }),
            ('threshold_bright', 'threshold_bright',
             'This is part of the image filter which controls the size of smallest detected features.',
             'int', threshold_bright, {
                 "min": 0,
                 "max": 255
             }),
            ('threshold_dark_inv', 'threshold_dark_inv',
             'This is part of the image filter which controls the size of largest detected features.',
             'int', threshold_dark_inv, {
                 "min": 0,
                 "max": 255
             }),
            ('circularity_threshold', 'circularity_threshold',
             'How circular?.', 'float', circularity_threshold, {
                 "min": 0,
                 "max": 100
             }),
            ('min_dot_size', 'min_dot_size',
             'min_dot_size smallest detected features.', 'float', min_dot_size,
             {
                 "min": 0,
                 "max": 100
             }),
            ('max_dot_size', 'max_dot_size',
             'max_dot_size largest detected features.', 'float', max_dot_size,
             {
                 "min": 0,
                 "max": 100
             }),
        ]
    }
    State.addKey('dotParams', {'type': 'image filter', 'attrs': attrs})
    State.setSel('dotParams')
    appIn = QtGui.QApplication(sys.argv)
    appIn.setStyle('plastique')
    win = QApp.QApp()
    win.setWindowTitle('Imaginarium Dots Viewer')
    QGLViewer.makeViewer(primitives=primitives,
                         primitives2D=primitives2D,
                         timeRange=(firstFrame, lastFrame),
                         callback=setFrame,
                         mats=mats,
                         camera_ids=camera_ids,
                         movies=movies,
                         pickCallback=picked,
                         appIn=appIn,
                         win=win)
Ejemplo n.º 3
0
    wavFilename = os.path.join(ted_dir, '32T01.WAV')
    md = MovieReader.open_file(wavFilename)

    c3d_filename = os.path.join(
        ted_dir, '201401211653-4Pico-32_Quad_Dialogue_01_Col_wip_02.c3d')
    c3d_dict = C3D.read(c3d_filename)
    c3d_frames, c3d_fps, c3d_labels = c3d_dict['frames'], c3d_dict[
        'fps'], c3d_dict['labels']
    if False:  # only for cleaned-up data
        c3d_subject = 'TedFace'
        which = np.where([s.startswith(c3d_subject) for s in c3d_labels])[0]
        c3d_frames = c3d_frames[:, which, :]
        c3d_labels = [c3d_labels[i] for i in which]
        print c3d_labels
    if False:  # this is for the cleaned-up data (don't apply the other offset...)
        offset = Calibrate.composeRT(Calibrate.composeR((0.0, 0.0, 0)),
                                     (0, 0, -8), 0)  # 0.902
        c3d_frames[:, :, :3] = np.dot(c3d_frames[:, :, :3] - offset[:3, 3],
                                      offset[:3, :3])[:, :, :3]
    offset = Calibrate.composeRT(Calibrate.composeR((3.9, -38.7, 0)),
                                 (-159.6, 188.8, 123 - 12), 0)  # 0.902
    c3d_frames[:, :, :3] = np.dot(c3d_frames[:, :, :3] - offset[:3, 3],
                                  offset[:3, :3])[:, :, :3]

    geos = []
    dat_directory = os.path.join(os.environ['GRIP_DATA'], 'dat')

    if False:  # experiments involving deformation transfer
        geos_filename = 'geos'
        if not os.path.exists(geos_filename):
            ted_dir = os.environ['GRIP_DATA']
            ted_obj = readFlatObjFlipMouth(os.path.join(ted_dir, 'ted.obj'))
Ejemplo n.º 4
0
def main():
	from UI import QGLViewer
	from UI import GLMeshes, GLPoints3D

	global g_setting_frame
	g_setting_frame = False
	# static data
	global g_webcam, g_md, g_rbfn, g_predictor, g_head_pan_shape, g_head_tilt_shape
	# runtime options and state
	global g_prev_smooth_shape, g_prev_vs, g_hmc_boot, g_neutral_corrective_shape, g_settle, g_head_pan_tilt_roll, g_smooth_pose
	global g_directory, g_TIS_server, g_mode, g_frame

	g_TIS_server = SocketServer.SocketServer()
	g_mode, g_frame = 0,{}

	grip_dir = os.environ['GRIP_DATA']
	g_directory = grip_dir

	g_webcam,g_md = None,None

	g_prev_vs, g_prev_smooth_shape = None,None
	g_hmc_boot = None
	#clear_neutral()
	g_neutral_corrective_shape = IO.load(os.path.join(g_directory,'neutral.out'))[1]
	g_settle = -1
	g_head_pan_tilt_roll = None
	g_smooth_pose = {}

	aam = IO.load(os.path.join(g_directory,'aam.out'))[1]
	if 0:
		svt = np.float32(aam['shapes']).reshape(-1,140)
		svt = np.dot(aam['shapes_u'],aam['shapes_s'].reshape(-1,1)*aam['shapes_vt'])
		svt = aam['shapes_s'].reshape(-1,1)*aam['shapes_vt']
		tmp = svt.reshape(svt.shape[0],-1,2)
		Sx,Sy = tmp[:,:,0],tmp[:,:,1]
		tmp = np.dot(np.dot(Sy.T,np.dot(Sx,Sx.T)),Sy)
		u,s,vt = np.linalg.svd(tmp, full_matrices=False)
		print s
		g_head_pan_shape = np.zeros((svt.shape[1]/2,2),dtype=np.float32)
		g_head_tilt_shape = np.zeros((svt.shape[1]/2,2),dtype=np.float32)
		g_head_pan_shape[:,0] = g_head_tilt_shape[:,1] = vt[0]
		print np.sum(g_head_pan_shape * aam['shapes_vt'][0].reshape(-1,2))
		print np.sum(g_head_tilt_shape * aam['shapes_vt'][1].reshape(-1,2))
	g_head_pan_shape = aam['shapes_vt'][0].reshape(-1,2)
	g_head_tilt_shape = aam['shapes_vt'][1].reshape(-1,2)
	g_head_tilt_shape = g_head_pan_shape[:,::-1]*np.float32([1,-1])
	print np.sum(g_head_pan_shape*g_head_tilt_shape)
	g_head_pan_shape *= np.linalg.norm(g_head_pan_shape)**-0.5
	g_head_tilt_shape *= np.linalg.norm(g_head_tilt_shape)**-0.5
	if np.sum(g_head_pan_shape[:,0] < 1): g_head_pan_shape = -g_head_pan_shape
	if np.sum(g_head_tilt_shape[:,1] > 1): g_head_tilt_shape = -g_head_tilt_shape
	#print np.sum(g_head_pan_shape * g_head_tilt_shape)
	#print np.dot(g_head_pan_shape[:,0],g_head_tilt_shape[:,1])

	g_predictor = Face.load_predictor(os.path.join(g_directory,'train.out'))
	rbfn_filename = os.path.join(g_directory,'rbfn.out')
	g_rbfn = IO.load(rbfn_filename)[1]
	#g_rbfn = convert_rbfn(rbfn_in_filename)
	#IO.save(rbfn_filename, g_rbfn)

	
	ref_shape = g_predictor['ref_shape']
	cx,cy = np.mean(ref_shape,axis=0)
	vx,vy = (np.var(ref_shape,axis=0)**0.5) * 2.5
	geo_bs = []
	ref_fs = Face.triangulate_2D(ref_shape)
	for p0,p1,p2 in ref_fs:
		geo_bs.append((p0,p1))
		geo_bs.append((p1,p2))
		geo_bs.append((p2,p0))
	geo_vs = np.zeros((len(ref_shape),3), dtype=np.float32)
	geo_fs = []
	geo_ts = np.float32([[1,0,0,0],[0,1,0,1000],[0,0,1,0]])
	geo_vts = np.zeros_like(ref_shape)
	
	img_vs = np.float32([[-1000,-1000,0],[1000,-1000,0],[1000,1000,0],[-1000,1000,0]])
	img_fs = np.int32([[0,1,2,3]])
	img_ts = np.float32([[1,0,0,0],[0,1,0,1000],[0,0,1,0]])
	img_vts = np.float32([[0,1],[1,1],[1,0],[0,0]])
	markup_mesh = GLPoints3D(vertices=geo_vs, edges=np.int32(geo_bs), names=[], colour=[0,1,0,1],edgeColour=[1,1,1,1])
	geo_mesh = GLMeshes(names=['geo_mesh'],verts=[geo_vs],faces=[geo_fs],transforms=[geo_ts],bones=[geo_bs], vts=[geo_vts], colour=[1,0,0,1])
	image_mesh = GLMeshes(names=['image_mesh'],verts=[img_vs],faces=[img_fs],transforms=[img_ts],vts=[img_vts])

	global g_bs_vs, g_bs_shape_mat, g_bs_fs, g_bs_vts, g_bs_shape_mat_T
	bs_dict = IO.load(os.path.join(g_directory,'harpy_ma.out'))[1]['blendShapes']['Harpy_cFace_GEOShape']
	obj_scale = 10.0
	g_bs_vs = np.float32(bs_dict['vs']*obj_scale)
	bs_dict['pts'] = [b*obj_scale for b in bs_dict['pts']]
	g_bs_fs = bs_dict['fs'] # warning: mix of quads and triangles :-(
	assert bs_dict['vts'].keys() == range(len(bs_dict['vts'].keys()))
	g_bs_vts = bs_dict['vts'].values()
	g_bs_ts = np.float32([[1,0,0,800],[0,1,0,-600],[0,0,1,300]])
	bs_mesh = GLMeshes(names=['bs_mesh'],verts=[g_bs_vs],faces=[g_bs_fs],transforms=[g_bs_ts],vts=[g_bs_vts],visible=False)

	rbfn_groups, rbfn_slider_splits, rbfn_slider_names, rbfn_marker_names = extract_groups(g_rbfn)
	slider_names = [(x[8:-2]+'.translateY' if x.startswith('get_ty') else x) for x in bs_dict['wt_names']]
	try:
		slider_order = [slider_names.index(x) for x in rbfn_slider_names]
	except Exception as e:
		print 'error',e
		slider_order = []
	g_bs_shape_mat = bs_dict['matrix'] = np.zeros((len(bs_dict['pts']), len(bs_dict['vs']), 3),dtype=np.float32)
	for m,ct,pt in zip(g_bs_shape_mat,bs_dict['cts'],bs_dict['pts']): m[ct] = pt
	g_bs_shape_mat = g_bs_shape_mat[slider_order]
	g_bs_shape_mat_T = g_bs_shape_mat.transpose(1,2,0).copy()

	layers = {'image_mesh':image_mesh,'geo_mesh':geo_mesh,'bs_mesh':bs_mesh,'markup_mesh':markup_mesh}
	app,win = QGLViewer.makeApp()
	outliner = win.qoutliner
	#for gi,geo in enumerate(layers.keys()): outliner.addItem(geo, data='_OBJ_'+geo, index=gi)

	State.setKey('ui',{'type':'ui','attrs':{\
		'harpy_xoffset':300.0,'show_harpy':True,'rotate':0,'mirroring':False,'unreal':True,'streaming_TIS':False,\
		'using_webcam':False,'HMC_mode':True,'booting':True,'filtering':True,'setting_neutral':True,'debugging':False, \
		'webcam_index':0,'cam_offset':700,'cam_fps':50,'cam_width':1280,'cam_height':720, 'movie_filename':''}})
	if True: # running on deployed face machine at 720p50
		State.setKey('/root/ui',{'type':'ui','attrs':{\
			'harpy_xoffset':300.0,'show_harpy':False,'rotate':1,'mirroring':False,'unreal':True,'streaming_TIS':False,\
			'using_webcam':True,'HMC_mode':True,'booting':True,'filtering':True,'setting_neutral':True,'debugging':False, \
			'webcam_index':0,'cam_offset':700,'cam_fps':50,'cam_width':1280,'cam_height':720, 'movie_filename':''}})
	win.setFields('ui',     [
		('show_harpy',      'show_harpy','Whether to display the Harpy','bool', False),
		('harpy_xoffset',   'xoffset', 'Pixels to offset Harpy to right', 'float', 300.0),
		('rotate',          'rotation','Rotate image 0=up,1=left,2=down,3=right,-1=any angle','int', 0),
		('mirroring',       'mirror',  'Show reversed',                 'bool', False),
		('unreal',          'unreal',  'Whether to connect to unreal',  'bool', True),
		#('streaming_TIS',   'streaming_TIS',  'Whether currently streaming',   'bool', False),
		('using_webcam',    'webcam',  'Whether using the webcam',      'bool', False),
		('HMC_mode',        'HMC_mode','Boot every frame',              'bool', True),
		('booting',         'boot',    'Boot at next chance',           'bool', True),
		('filtering',       'filter',  'Whether to filter noise',       'bool', True),
		('setting_neutral', 'neutral', 'Set neutral at next chance',    'bool', False),
		('debugging',       'debug',   'Show rbfn input for debugging', 'bool', False),
		('webcam_index',    'camindex', 'The index of the webcam',      'int',  0),
		('cam_offset',      'camoffset','The offset of the webcam',     'int',  700),
		('cam_fps',         'fps',      'The frame rate of the webcam', 'int',  50),
		('cam_width',       'width',    'The width of the webcam image', 'int',  1280),
		('cam_height',      'height',   'The height of the webcam image', 'int',  720),
		('movie_filename',  'movie',   'The filename of the movie', 'string',  ''),
		])
	slider_names = sorted(g_rbfn['slider_names'])
	win.setFields('sliders', [(sn,sn,'Slider %d'%si,'float',0.0) for si,sn in enumerate(slider_names)])
	State.setKey('/root/sliders', {'type':'sliders','attrs':{sn:0.0 for sn in slider_names}})
	outliner.set_root('/root')
	#outliner.addItem('sliders', data='sliders', index=1)
	win.outliner.raise_()
	#win.select('ui')
	QApp.app.dirtyCB = dirty_cb
	QApp.app.addMenuItem({'menu':'&File','item':'Import &movie','tip':'Import a movie file','cmd':import_movie})
	QApp.app.addMenuItem({'menu':'&Edit','item':'Retrain rbfn','tip':'Train the rbfn','cmd':retrain_RBFN})
	QApp.app.addMenuItem({'menu':'&Edit','item':'Retrain rbfn (no linear)','tip':'Train the rbfn with no linear part','cmd':retrain_RBFN_no_linear})
	QApp.app.addMenuItem({'menu':'&Edit','item':'Retrack refresh rbfn','tip':'Refresh the rbfn','cmd':retrack_refresh_rbfn})
	QApp.app.addMenuItem({'menu':'&Edit','item':'Retrack remap rbfn','tip':'Rebuild the rbfn','cmd':retrack_remap_rbfn})
	QApp.app.addMenuItem({'menu':'&File','item':'Export rbfn','tip':'Export the rbfn','cmd':export_rbfn})
	State.clearUndoStack()
	QGLViewer.makeViewer(appName='StreamVideoTrack',timeRange=(0,100), callback=setFrame_cb, keyCallback=keypress_cb, layers=layers, mats=[Calibrate.makeMat(Calibrate.composeRT(np.eye(3)*[10,10,1],[0,1000,6000],1000),[0,0],[1920,1080])], camera_ids=['RBFN'])

	# Ensure the server has stopped when program terminates
	g_TIS_server.Stop()
Ejemplo n.º 5
0
def setFrame_cb(fi):
	attrs = State.getKey('/root/ui/attrs/')
	global g_setting_frame
	if g_setting_frame: return
	g_setting_frame = True
	try: # within this loop we handle the timeline, which could trigger calling this function recursively
		global g_mode, g_frame, g_TIS_server, g_neutral_corrective_shape
		global g_smooth_pose
		view = QApp.view()
		cid = view.cameraIndex()
		if cid != g_mode: # deal with changing modes
			g_mode = cid
			if g_mode == 0:
				if g_md is not None: QApp.app.qtimeline.setRange(0, g_md['vmaxframe'])
			elif g_mode == 1:
				pose_splits = rbfn_pose_splits()
				QApp.app.qtimeline.setRange(0, pose_splits[-1]-1)
			new_frame = g_frame.get(g_mode,fi)
			if new_frame != fi:
				QApp.app.qtimeline.frame = new_frame
				fi = new_frame
	except Exception as e:
		print 'exc setFrame',e
	g_setting_frame = False
	g_frame[g_mode] = fi
	
	if not attrs['setting_neutral']: g_neutral_corrective_shape = 0
	
	new_pose,new_shape,norm_shape,img,slider_names,slider_values,A = [track_view_cb,rbfn_view_cb][g_mode](fi,attrs)

	
	mirror_scale = -1 if attrs['mirroring'] else 1
	h,wm = img.shape[0]*0.5,img.shape[1]*0.5*mirror_scale

	geo_vs = np.zeros((new_shape.shape[0],3), dtype=np.float32)	
	if attrs['debugging']: # display the stabilised data
		geo_vs[:,:2] = norm_shape
		geo_vs *= 200
		geo_vs[:,:2] += np.int32(np.mean(new_shape, axis=0)/200)*200
	else: # display the tracking data
		geo_vs[:,:2] = new_shape

	geo_mesh,image_mesh,bs_mesh = QApp.app.getLayers(['geo_mesh', 'image_mesh', 'bs_mesh'])
	
	bs_mesh.visible = attrs['show_harpy']
	if bs_mesh.visible:
		global g_bs_vs, g_bs_shape_mat_T
		bs_mesh.setVs(g_bs_vs + np.dot(g_bs_shape_mat_T, np.clip(slider_values[:-3],0,1)))
		# compute the Harpy position
		R = Calibrate.composeR(new_pose*[1,-1,-1])
		if g_mode == 1: R = np.eye(3) # TODO
		bs_ts = Calibrate.composeRT(R,[0,1720,0],0) # compensate for the offset of the Harpy (temples ~1720mm above origin)
		scale = 1.0/np.linalg.norm(160.*A) # IPD (64mm) / 0.4 (ref_shape) = 160.
		off = np.mean(new_shape[[0,16]],axis=0) # get the position of the temples (pixels)
		g_smooth_pose[g_mode] = filter_data(np.float32([scale,off[0],off[1]]), g_smooth_pose.setdefault(g_mode,None), 10.0)
		pose = g_smooth_pose[g_mode]
		bs_ts[:3] *= pose[0]
		bs_ts[:3,3] += [pose[1]-abs(wm),1000+pose[2]-h,0]
		# offset screen-right 300mm
		bs_ts[:3,3] += (pose[0]*attrs['harpy_xoffset'])*np.float32([np.cos(np.radians(view.camera.cameraRoll)),-np.sin(np.radians(view.camera.cameraRoll)),0.0])
		bs_mesh.transforms[0] = bs_ts.T
	
	geo_mesh.setVs(geo_vs)
	geo_mesh.colour=[0 if attrs['streaming_TIS'] else 1,1 if attrs['streaming_TIS'] else 0,0,1]
	geo_mesh.transforms[0][:,:3] = [[mirror_scale,0,0],[0,1,0],[0,0,1],[-wm,1000-h,0.1]]
	image_mesh.setVs(np.float32([[-wm,-h,0],[wm,-h,0],[wm,h,0],[-wm,h,0]]))
	image_mesh.setImage(img)
	if attrs['unreal']:
		if not attrs['streaming_TIS']: toggle_unreal()
		ret, activeConnections = g_TIS_server.WriteAll(PyTISStream.getBlendshapeData(slider_names, slider_values))
		if not ret:
			print "Server is not Initialised"
			State._setKey('/root/ui/attrs/streaming_TIS', False)
	else:
		# Turn off streaming
		if attrs['streaming_TIS']: toggle_unreal()
	QApp.app.updateGL()
Ejemplo n.º 6
0
        wavFilename = os.path.join(ted_dir, '32T01.WAV')
        md = MovieReader.open_file(wavFilename)
        c3d_filename = os.path.join(
            ted_dir, '201401211653-4Pico-32_Quad_Dialogue_01_Col_wip_02.c3d')
        c3d_dict = C3D.read(c3d_filename)
        c3d_frames, c3d_fps, c3d_labels = c3d_dict['frames'], c3d_dict[
            'fps'], c3d_dict['labels']
        if False:  # only for cleaned-up data
            c3d_subject = 'TedFace'
            which = np.where([s.startswith(c3d_subject)
                              for s in c3d_labels])[0]
            c3d_frames = c3d_frames[:, which, :]
            c3d_labels = [c3d_labels[i] for i in which]
            print c3d_labels
        if False:  # this is for the cleaned-up data (don't apply the other offset...)
            offset = Calibrate.composeRT(Calibrate.composeR((0.0, 0.0, 0)),
                                         (0, 0, -8), 0)  # 0.902
            c3d_frames[:, :, :3] = np.dot(c3d_frames[:, :, :3] - offset[:3, 3],
                                          offset[:3, :3])[:, :, :3]
        offset = Calibrate.composeRT(Calibrate.composeR((3.9, -38.7, 0)),
                                     (-159.6, 188.8, 123 - 12), 0)  # 0.902
        c3d_frames[:, :, :3] = np.dot(c3d_frames[:, :, :3] - offset[:3, 3],
                                      offset[:3, :3])[:, :, :3]

        geos = []
        dat_directory = os.path.join(os.environ['GRIP_DATA'], 'dat')

        if False:  # experiments involving deformation transfer
            geos_filename = 'geos'
            if not os.path.exists(geos_filename):
                ted_dir = os.path.join(os.environ['GRIP_DATA'], 'ted')
                ted_obj = OBJReader.readFlatObjFlipMouth(