Example #1
0
	def R(self, upright=False):
		'''Yields the orientation matrix for the view, R.'''
		return Calibrate.composeR([self.cameraPan,self.cameraTilt,0 if (self.lockedUpright and upright) else self.cameraRoll])
Example #2
0
    def generate_skeleton_lods(skelDict, Gs=None):
        # TODO: First iteration: Improve code and optimise
        # TODO: Contains hard coded values (generalise.. actually probably better to use a callback.. lodgenerator visitor)
        from GCore import Calibrate
        vs, tris, orientation, names = [], [], [], []
        if 'jointWidth' not in skelDict: return
        jointWidth = skelDict['jointWidth']
        jointHeightMultiplier = 1.3

        if Gs is None: Gs = skelDict['Gs']
        Bs = skelDict['Bs']

        lodVerts = skelDict['verts']
        lodTris = skelDict['tris']

        for jointIdx, jointName in enumerate(skelDict['jointNames']):
            if 'Free' in jointName: continue

            jointGs = Gs[jointIdx]
            jointBs = Bs[jointIdx]
            whichAxis = np.where(jointBs == 0)[0]
            R, T, _ = Calibrate.decomposeRT(jointGs, 1, False)
            jointMeshScale = jointBs.copy()
            if jointName == 'root': jointMeshScale = jointMeshScale * 1.4
            elif 'Spine' in jointName: jointMeshScale = jointMeshScale * 1.2

            jointMeshScale[whichAxis] = jointWidth[jointName]

            if jointName == 'VSS_Chest':
                jointMeshScale[0] = jointWidth[jointName][0]
                jointMeshScale[1] = 120.
                jointMeshScale[2] = jointWidth[jointName][1]

            axisToggle = np.array([1, 1, 1], dtype=np.float32)
            axisToggle[whichAxis] = 0.0
            translations = jointMeshScale / 2
            if jointName == 'VSS_Chest': translations[0:1] = 0
            offset = translations * axisToggle

            boneVerts = lodVerts.copy()
            for vi, v in enumerate(boneVerts):
                v = v * jointMeshScale
                if jointName in ['root']:
                    v = v - offset
                    v = np.dot(
                        Calibrate.composeR(
                            np.array([0, 0, 90], dtype=np.float32)), v.T)
                else:
                    v = v + offset

                v = np.dot(jointGs, np.hstack((v, 1)).T)
                boneVerts[vi] = v[:3]

            tris.append(lodTris + len(vs) * 8)
            vs.append(boneVerts)

            boneLength = jointBs[np.where(jointBs != 0)[0]]
            orientation.append(
                0 if boneLength.any() and boneLength[0] < 0 else 1)
            names.append(jointName)

        v = np.concatenate((vs))
        t = np.concatenate((tris)).tolist()
        lodAttrs = {
            'triangles': v[t],
            'verts': v,
            'tris': t,
            'faces': tris,
            'names': names
        }
        skelDict['visibilityLod'] = lodAttrs
        return v, t, vs, tris, orientation, names
Example #3
0
def main():
    global State, mats, movieFilenames, primitives
    global movies, primitives2D, deinterlacing, detectingWands
    import IO
    import sys, os
    deinterlacing = False
    detectingWands = False
    detectingTiara = False
    dot_detections = None
    detections_filename = None
    frame_offsets = None
    firstFrame, lastFrame = 0, 5000
    drawDotSize = 4.0
    fovX, (ox,
           oy), pan_tilt_roll, tx_ty_tz, distortion = 50., (0,
                                                            0), (0, 0,
                                                                 0), (0, 1250,
                                                                      0), (0,
                                                                           0)
    mats = []
    grip_directory = os.environ['GRIP_DATA']

    if 0:
        fovX, (ox, oy), pan_tilt_roll, tx_ty_tz, distortion = 37.9, (0, 0), (
            -66.0, 3.5, -0.2), (4850, 1330, 3280), (0, 0)  # roughed in
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mat0 = [
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]
        fovX, (ox, oy), pan_tilt_roll, tx_ty_tz, distortion = 55.8, (0, 0), (
            -103.6, 3.5, -0.3), (2980, 1380, -2180), (0, 0)  # roughed in
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mat1 = [
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]
        fovX, (ox, oy), pan_tilt_roll, tx_ty_tz, distortion = 49.3, (0, 0), (
            27.9, 4.0, -0.2), (-5340, 1150, 5030), (0, 0)  # roughed in
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mat2 = [
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]
        fovX, (ox, oy), pan_tilt_roll, tx_ty_tz, distortion = 50.6, (0, 0), (
            -156.6, 4.9, 0.2), (-105, 1400, -4430), (0, 0)  # roughed in
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mat3 = [
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]
        mats = [mat0, mat1, mat2, mat3]
        xcp_filename = '154535_Cal168_Floor_Final.xcp'
        directory = os.path.join(grip_directory, 'REFRAME')
        movieFilenames = [
            '001E0827_01.MP4', '001F0813_01.MP4', '001G0922_01.MP4',
            '001H0191_01.MP4'
        ]
        #mats,movieFilenames = mats[:1],movieFilenames[:1] # restrict to single-view
        frame_offsets = [119 + 160, 260, 339, 161]
        small_blur, large_blur = 1, 25
        min_dot_size = 1.0
        max_dot_size = 20.0
        circularity_threshold = 3.0
        threshold_bright, threshold_dark_inv = 250, 250  #135,135
    elif 0:
        xcp_filename = '201401211653-4Pico-32_Quad_Dialogue_01_Col_wip_01.xcp'
        detections_filename = 'detections.dat'
        detectingTiara = True
        pan_tilt_roll = (0, 0, 90)
        distortion = (0.291979, 0.228389)
        directory = os.path.join(os.environ['GRIP_DATA'], 'ted')
        movieFilenames = [
            '201401211653-4Pico-32_Quad_Dialogue_01_%d.mpg' % xi
            for xi in range(1)
        ]
        firstFrame = 511
        small_blur, large_blur = 1, 20
        min_dot_size = 1.0
        max_dot_size = 16.0
        circularity_threshold = 3.0
        threshold_bright, threshold_dark_inv = 0, 170
    elif 1:
        xcp_filename = '50_Grip_RoomCont_AA_02.xcp'
        detections_filename = 'detections.dat'
        pan_tilt_roll = (0, 0, 0)
        distortion = (0.291979, 0.228389)
        directory = os.path.join(os.environ['GRIP_DATA'], '151110')
        movieFilenames = ['50_Grip_RoomCont_AA_02.v2.mov']
        firstFrame = 0
        small_blur, large_blur = 1, 20
        min_dot_size = 1.0
        max_dot_size = 16.0
        circularity_threshold = 3.0
        threshold_bright, threshold_dark_inv = 170, 170

    attrs = dict([(v, eval(v)) for v in [
        'small_blur', 'large_blur', 'threshold_bright', 'threshold_dark_inv',
        'circularity_threshold', 'min_dot_size', 'max_dot_size'
    ]])

    primitives2D = QGLViewer.makePrimitives2D(([], []), ([], []))
    primitives = []
    if len(movieFilenames) is 1:
        # TODO: time_base, timecode
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mats = [[
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]]
        camera_ids = ['video']
        movies = [
            MovieReader.open_file(os.path.join(directory, movieFilenames[0]),
                                  audio=False)
        ]
    else:  # hard coded cameras
        if xcp_filename.endswith('.xcp'):
            if detectingTiara:  # gruffalo
                c3d_filename = os.path.join(
                    directory,
                    '201401211653-4Pico-32_Quad_Dialogue_01_Col_wip_02.c3d')
                from IO import C3D
                c3d_dict = C3D.read(c3d_filename)
                global c3d_frames
                c3d_frames, c3d_fps, c3d_labels = c3d_dict['frames'], c3d_dict[
                    'fps'], c3d_dict['labels']
                c3d_subject = ''  #'TedFace'
                which = np.where(
                    [s.startswith(c3d_subject) for s in c3d_labels])[0]
                c3d_frames = c3d_frames[:, which, :]
                c3d_labels = [c3d_labels[i] for i in which]
                print len(c3d_frames)
            xcp, xcp_data = ViconReader.loadXCP(
                os.path.join(directory, xcp_filename))
            mats.extend(xcp)
        elif xcp_filename.endswith('.cal'):
            from IO import OptitrackReader
            xcp, xcp_data = OptitrackReader.load_CAL(
                os.path.join(directory, xcp_filename))
            mats = xcp
            print 'mats', len(mats), len(movieFilenames)
            assert (len(mats) == len(movieFilenames))
        camera_ids = []
        movies = []
        for ci, mf in enumerate(movieFilenames):
            fo = 0 if frame_offsets is None else frame_offsets[ci]
            movies.append(
                MovieReader.open_file(os.path.join(directory, mf),
                                      audio=False,
                                      frame_offset=fo))
        camera_ids = ['cam_%d' % ci for ci in xrange(len(mats))]
        print len(mats), len(movies), len(camera_ids)
    primitives.append(GLPoints3D([]))
    primitives.append(GLPoints3D([]))
    primitives.append(GLPoints3D([]))
    primitives[0].colour = (0, 1, 1, 0.5)  # back-projected "cyan" points
    primitives[1].colour = (0, 0, 1, 0.5)
    primitives[1].pointSize = 5
    primitives[2].colour = (1, 0, 0, 0.99)

    if len(movieFilenames) != 1 and detections_filename != None:
        try:
            dot_detections = IO.load(detections_filename)[1]
        except:
            numFrames = len(c3d_frames)  # TODO HACK HACK
            dot_detections = movies_to_detections(movies, range(numFrames),
                                                  deinterlacing, attrs)
            IO.save(detections_filename, dot_detections)

        if detectingTiara:
            x3ds_seq = {}
            for fi in dot_detections.keys():
                frame = c3d_frames[(fi - 55) % len(c3d_frames)]
                which = np.array(np.where(frame[:, 3] == 0)[0], dtype=np.int32)
                x3ds_seq[fi] = np.concatenate((VICON_tiara_x3ds + np.array([150,-100,0],dtype=np.float32),frame[which,:3])), \
                      np.concatenate((np.arange(len(VICON_tiara_x3ds),dtype=np.int32),which+len(VICON_tiara_x3ds)))

            dot_labels = get_labels(dot_detections.keys(),
                                    x3ds_seq,
                                    dot_detections,
                                    mats,
                                    x2d_threshold=0.05)

            calibration_fi = 546 - 2 - 6

            RT = tighten_calibration(x3ds_seq[calibration_fi],
                                     dot_labels[calibration_fi], mats)
            for v in c3d_frames:
                v[:, :3] = np.dot(v[:, :3], RT[:3, :3].T) + RT[:, 3]

            if True:
                dot_detections = IO.load(detections_filename)[1]
                x3ds_seq = {}
                for fi in dot_detections.keys():
                    frame = c3d_frames[(fi - 55) % len(c3d_frames)]
                    which = np.array(np.where(frame[:, 3] == 0)[0],
                                     dtype=np.int32)
                    x3ds_seq[fi] = np.concatenate((VICON_tiara_x3ds + np.array([0,1000,0],dtype=np.float32),frame[which,:3])), \
                          np.concatenate((np.arange(len(VICON_tiara_x3ds),dtype=np.int32),which+len(VICON_tiara_x3ds)))

                #dot_labels = get_labels(dot_detections.keys(), x3ds_seq, dot_detections, mats, x2d_threshold = 0.05)

    if detectingTiara:
        primitives.append(GLPoints3D(VICON_tiara_x3ds + [0, 1000, 0]))
        primitives[-1].pointSize = 5

    global track3d, prev_frame, booting, trackGraph
    track3d = Label.Track3D(mats[:len(movies)],
                            x2d_threshold=0.03,
                            x3d_threshold=5.0,
                            min_rays=3,
                            boot_interval=2)  #tilt_threshold = 0.01, gruffalo
    trackGraph = Label.TrackGraph()
    prev_frame = 0
    booting = 1

    from UI import QApp
    from PySide import QtGui
    from GCore import State
    # Modified the options parameter for fields to be the range of acceptable values for the box
    # Previously would crash if small_blur got too low
    QApp.fields = {
        'image filter': [
            ('small_blur', 'Small blur radius',
             'This is part of the image filter which controls the size of smallest detected features.',
             'int', small_blur, {
                 "min": 0,
                 "max": None
             }),
            ('large_blur', 'Large blur radius',
             'This is part of the image filter which controls the size of largest detected features.',
             'int', large_blur, {
                 "min": 0,
                 "max": None
             }),
            ('threshold_bright', 'threshold_bright',
             'This is part of the image filter which controls the size of smallest detected features.',
             'int', threshold_bright, {
                 "min": 0,
                 "max": 255
             }),
            ('threshold_dark_inv', 'threshold_dark_inv',
             'This is part of the image filter which controls the size of largest detected features.',
             'int', threshold_dark_inv, {
                 "min": 0,
                 "max": 255
             }),
            ('circularity_threshold', 'circularity_threshold',
             'How circular?.', 'float', circularity_threshold, {
                 "min": 0,
                 "max": 100
             }),
            ('min_dot_size', 'min_dot_size',
             'min_dot_size smallest detected features.', 'float', min_dot_size,
             {
                 "min": 0,
                 "max": 100
             }),
            ('max_dot_size', 'max_dot_size',
             'max_dot_size largest detected features.', 'float', max_dot_size,
             {
                 "min": 0,
                 "max": 100
             }),
        ]
    }
    State.addKey('dotParams', {'type': 'image filter', 'attrs': attrs})
    State.setSel('dotParams')
    appIn = QtGui.QApplication(sys.argv)
    appIn.setStyle('plastique')
    win = QApp.QApp()
    win.setWindowTitle('Imaginarium Dots Viewer')
    QGLViewer.makeViewer(primitives=primitives,
                         primitives2D=primitives2D,
                         timeRange=(firstFrame, lastFrame),
                         callback=setFrame,
                         mats=mats,
                         camera_ids=camera_ids,
                         movies=movies,
                         pickCallback=picked,
                         appIn=appIn,
                         win=win)
Example #4
0
    wavFilename = os.path.join(ted_dir, '32T01.WAV')
    md = MovieReader.open_file(wavFilename)

    c3d_filename = os.path.join(
        ted_dir, '201401211653-4Pico-32_Quad_Dialogue_01_Col_wip_02.c3d')
    c3d_dict = C3D.read(c3d_filename)
    c3d_frames, c3d_fps, c3d_labels = c3d_dict['frames'], c3d_dict[
        'fps'], c3d_dict['labels']
    if False:  # only for cleaned-up data
        c3d_subject = 'TedFace'
        which = np.where([s.startswith(c3d_subject) for s in c3d_labels])[0]
        c3d_frames = c3d_frames[:, which, :]
        c3d_labels = [c3d_labels[i] for i in which]
        print c3d_labels
    if False:  # this is for the cleaned-up data (don't apply the other offset...)
        offset = Calibrate.composeRT(Calibrate.composeR((0.0, 0.0, 0)),
                                     (0, 0, -8), 0)  # 0.902
        c3d_frames[:, :, :3] = np.dot(c3d_frames[:, :, :3] - offset[:3, 3],
                                      offset[:3, :3])[:, :, :3]
    offset = Calibrate.composeRT(Calibrate.composeR((3.9, -38.7, 0)),
                                 (-159.6, 188.8, 123 - 12), 0)  # 0.902
    c3d_frames[:, :, :3] = np.dot(c3d_frames[:, :, :3] - offset[:3, 3],
                                  offset[:3, :3])[:, :, :3]

    geos = []
    dat_directory = os.path.join(os.environ['GRIP_DATA'], 'dat')

    if False:  # experiments involving deformation transfer
        geos_filename = 'geos'
        if not os.path.exists(geos_filename):
            ted_dir = os.environ['GRIP_DATA']
Example #5
0
def setFrame_cb(fi):
	attrs = State.getKey('/root/ui/attrs/')
	global g_setting_frame
	if g_setting_frame: return
	g_setting_frame = True
	try: # within this loop we handle the timeline, which could trigger calling this function recursively
		global g_mode, g_frame, g_TIS_server, g_neutral_corrective_shape
		global g_smooth_pose
		view = QApp.view()
		cid = view.cameraIndex()
		if cid != g_mode: # deal with changing modes
			g_mode = cid
			if g_mode == 0:
				if g_md is not None: QApp.app.qtimeline.setRange(0, g_md['vmaxframe'])
			elif g_mode == 1:
				pose_splits = rbfn_pose_splits()
				QApp.app.qtimeline.setRange(0, pose_splits[-1]-1)
			new_frame = g_frame.get(g_mode,fi)
			if new_frame != fi:
				QApp.app.qtimeline.frame = new_frame
				fi = new_frame
	except Exception as e:
		print 'exc setFrame',e
	g_setting_frame = False
	g_frame[g_mode] = fi
	
	if not attrs['setting_neutral']: g_neutral_corrective_shape = 0
	
	new_pose,new_shape,norm_shape,img,slider_names,slider_values,A = [track_view_cb,rbfn_view_cb][g_mode](fi,attrs)

	
	mirror_scale = -1 if attrs['mirroring'] else 1
	h,wm = img.shape[0]*0.5,img.shape[1]*0.5*mirror_scale

	geo_vs = np.zeros((new_shape.shape[0],3), dtype=np.float32)	
	if attrs['debugging']: # display the stabilised data
		geo_vs[:,:2] = norm_shape
		geo_vs *= 200
		geo_vs[:,:2] += np.int32(np.mean(new_shape, axis=0)/200)*200
	else: # display the tracking data
		geo_vs[:,:2] = new_shape

	geo_mesh,image_mesh,bs_mesh = QApp.app.getLayers(['geo_mesh', 'image_mesh', 'bs_mesh'])
	
	bs_mesh.visible = attrs['show_harpy']
	if bs_mesh.visible:
		global g_bs_vs, g_bs_shape_mat_T
		bs_mesh.setVs(g_bs_vs + np.dot(g_bs_shape_mat_T, np.clip(slider_values[:-3],0,1)))
		# compute the Harpy position
		R = Calibrate.composeR(new_pose*[1,-1,-1])
		if g_mode == 1: R = np.eye(3) # TODO
		bs_ts = Calibrate.composeRT(R,[0,1720,0],0) # compensate for the offset of the Harpy (temples ~1720mm above origin)
		scale = 1.0/np.linalg.norm(160.*A) # IPD (64mm) / 0.4 (ref_shape) = 160.
		off = np.mean(new_shape[[0,16]],axis=0) # get the position of the temples (pixels)
		g_smooth_pose[g_mode] = filter_data(np.float32([scale,off[0],off[1]]), g_smooth_pose.setdefault(g_mode,None), 10.0)
		pose = g_smooth_pose[g_mode]
		bs_ts[:3] *= pose[0]
		bs_ts[:3,3] += [pose[1]-abs(wm),1000+pose[2]-h,0]
		# offset screen-right 300mm
		bs_ts[:3,3] += (pose[0]*attrs['harpy_xoffset'])*np.float32([np.cos(np.radians(view.camera.cameraRoll)),-np.sin(np.radians(view.camera.cameraRoll)),0.0])
		bs_mesh.transforms[0] = bs_ts.T
	
	geo_mesh.setVs(geo_vs)
	geo_mesh.colour=[0 if attrs['streaming_TIS'] else 1,1 if attrs['streaming_TIS'] else 0,0,1]
	geo_mesh.transforms[0][:,:3] = [[mirror_scale,0,0],[0,1,0],[0,0,1],[-wm,1000-h,0.1]]
	image_mesh.setVs(np.float32([[-wm,-h,0],[wm,-h,0],[wm,h,0],[-wm,h,0]]))
	image_mesh.setImage(img)
	if attrs['unreal']:
		if not attrs['streaming_TIS']: toggle_unreal()
		ret, activeConnections = g_TIS_server.WriteAll(PyTISStream.getBlendshapeData(slider_names, slider_values))
		if not ret:
			print "Server is not Initialised"
			State._setKey('/root/ui/attrs/streaming_TIS', False)
	else:
		# Turn off streaming
		if attrs['streaming_TIS']: toggle_unreal()
	QApp.app.updateGL()
Example #6
0
        wavFilename = os.path.join(ted_dir, '32T01.WAV')
        md = MovieReader.open_file(wavFilename)
        c3d_filename = os.path.join(
            ted_dir, '201401211653-4Pico-32_Quad_Dialogue_01_Col_wip_02.c3d')
        c3d_dict = C3D.read(c3d_filename)
        c3d_frames, c3d_fps, c3d_labels = c3d_dict['frames'], c3d_dict[
            'fps'], c3d_dict['labels']
        if False:  # only for cleaned-up data
            c3d_subject = 'TedFace'
            which = np.where([s.startswith(c3d_subject)
                              for s in c3d_labels])[0]
            c3d_frames = c3d_frames[:, which, :]
            c3d_labels = [c3d_labels[i] for i in which]
            print c3d_labels
        if False:  # this is for the cleaned-up data (don't apply the other offset...)
            offset = Calibrate.composeRT(Calibrate.composeR((0.0, 0.0, 0)),
                                         (0, 0, -8), 0)  # 0.902
            c3d_frames[:, :, :3] = np.dot(c3d_frames[:, :, :3] - offset[:3, 3],
                                          offset[:3, :3])[:, :, :3]
        offset = Calibrate.composeRT(Calibrate.composeR((3.9, -38.7, 0)),
                                     (-159.6, 188.8, 123 - 12), 0)  # 0.902
        c3d_frames[:, :, :3] = np.dot(c3d_frames[:, :, :3] - offset[:3, 3],
                                      offset[:3, :3])[:, :, :3]

        geos = []
        dat_directory = os.path.join(os.environ['GRIP_DATA'], 'dat')

        if False:  # experiments involving deformation transfer
            geos_filename = 'geos'
            if not os.path.exists(geos_filename):
                ted_dir = os.path.join(os.environ['GRIP_DATA'], 'ted')