예제 #1
0
def GiantViewer(raw_filename, cal_filename):
    '''Generate a 3D view of an x2d file, using the calibration.'''
    raw_frames = {100: None}
    print 'loading calibration'
    camera_info = GiantReader.readCal(cal_filename)

    camera_ids = None
    print "Camera IDs:\n{}".format(camera_ids)
    print 'loading 2d'
    raw_dict = GiantReader.readAsciiRaw(raw_filename)
    raw_frames = raw_dict['frames']

    print 'num frames', raw_dict['numFrames']

    mats = [
        Calibrate.makeMat(camera['MAT'], camera['DISTORTION'], (512, 440))
        for camera in camera_info['Cameras']
    ]
    track3d = Label.Track3D(mats)

    primitives = QGLViewer.makePrimitives(vertices=[], altVertices=[])
    primitives2D = QGLViewer.makePrimitives2D(([], [0]))
    cb = functools.partial(intersectRaysCB,
                           raw_frames=raw_frames,
                           mats=mats,
                           primitives=primitives,
                           primitives2D=primitives2D,
                           track3d=track3d)
    QGLViewer.makeViewer(primitives=primitives,
                         primitives2D=primitives2D,
                         timeRange=(1, max(raw_frames.keys()), 1, 100.0),
                         callback=cb,
                         mats=mats,
                         camera_ids=camera_ids)  # , callback=intersectRaysCB
예제 #2
0
        def cook(self, location, interface, attrs):
            if not self.client.IsConnected():
                print "Not connected"
                if not self.client.Connect(attrs['ipAddress'],
                                           int(attrs['port'])):
                    print "Connection Failed"
                    return False
                print "New Connection!"

            currentFrame = self.client.GetCurrentFrame()

            RT = currentFrame.CameraTracking.Transform

            fovX, fovY = currentFrame.OpticalParameters.FOV

            if np.abs(fovY) < 1e-10:
                return False

            cx, cy = currentFrame.OpticalParameters.ProjectionCenter  # TODO: Investigate format
            ox, oy = 2 * (cx - 0.5), 2 * (cy - 0.5)

            K = Calibrate.composeK(fovX,
                                   ox=ox,
                                   oy=oy,
                                   square=(fovY / fovX),
                                   skew=0)[:3, :3]
            width_height = currentFrame.OpticalParameters.Resolution
            P = np.dot(K, RT)
            mat = Calibrate.makeMat(P, (0.0, 0.0),
                                    width_height)  # TODO Distortion Param

            self.setupAttrs = {
                'Ps': [P],
                'camera_ids': ["1"],
                'camera_names': ["NCAM_Camera"],
                'mats': [mat],
                'updateMats': True
            }
            interface.createChild('cameras',
                                  'cameras',
                                  atLocation=location,
                                  attrs=self.setupAttrs)

            return True
예제 #3
0
	def cook(self, location, interface, attrs):
		if not self.useFrame(interface.frame(), attrs['frameRange']):
			interface.setAttr('updateMats', False)
			return

		# We need 2D data e.g. wand detections from a wand op
		# We need 3D wand data from e.g. c3d or a 3D wand detector
		dets_location = attrs['detections']
		x3ds_location = attrs['x3ds']
		if not dets_location or not x3ds_location: return

		# Get the 2D and 3D data
		x2ds = interface.attr('rx2ds', atLocation=dets_location)
		x2d_splits = interface.attr('x2ds_splits', atLocation=dets_location)
		x3ds = interface.attr('x3ds', atLocation=x3ds_location)

		if x2ds is None or x2d_splits is None or x3ds is None: return

		numCameras = len(x2d_splits) - 1
		error_threshold = attrs['error_threshold']

		# Get the data we've collected already so we can add to it
		frame = interface.frame()
		dets_colours = interface.attr('x2ds_colours', atLocation=dets_location)
		collectedDets = interface.attr('collect_rx2ds')
		collectedX3ds = interface.attr('collect_x3ds')
		lastFrame = interface.attr('lastFrame', [frame] * numCameras)
		emptyFrame3d = np.array([[]], dtype=np.float32).reshape(-1, 3)

		# This is potentially used by other ops so we only set it when we have some confidence
		# (and we might reset or tweak the values to indicate confidence levels at some point)
		cameraErrors = interface.attr('cameraErrors', [-1] * numCameras)

		# This is never modified to allow checking the camera rms values regardless of what we make of them
		rmsValues = interface.attr('rms', [-1] * numCameras)

		# Get the width and height for the videos
		vwidth = interface.attr('vwidth', [1920] * numCameras)
		vheight = interface.attr('vheight', [1080] * numCameras)

		# Get the frame mapping for x3ds
		x3ds_frames = interface.attr('x3ds_frames', {})
		x2ds_frames = interface.attr('x2ds_frames', [[] for i in xrange(numCameras)])

		# Get the camera matrices. We initialise them with default settings if we don't find any
		mats = interface.attr('mats', atLocation=location)
		if mats is None:
			mats = []
			for ci in range(numCameras):
				mats.append(Calibrate.makeUninitialisedMat(ci, (vheight[ci], vwidth[ci])))

		# Allow overriding the error threshold using an attribute (on the cooked location)
		error_threshold_attr = interface.attr('error_threshold')
		if error_threshold_attr is not None:
			error_threshold = error_threshold_attr

		Ps = interface.attr('Ps')
		if Ps is None: Ps = [np.array([], dtype=np.float32) for n in range(numCameras)]

		# Get the minimum number of samples we need to start solving distortion etc. as specified by the user
		minSamples = attrs['min_samples']

		# Prepare the collected data for further processing (or initialise if nothing has been collected)
		if collectedDets is not None:
			c_x2ds, c_splits = collectedDets
			cams_collected = [c_x2ds[c0:c1] for ci, (c0, c1) in enumerate(zip(c_splits[:-1], c_splits[1:]))]
		else:
			cams_collected = [[] for ci, (c0, c1) in enumerate(zip(x2d_splits[:-1], x2d_splits[1:]))]
			collectedX3ds = []
			for ci, (c0, c1) in enumerate(zip(x2d_splits[:-1], x2d_splits[1:])):
				collectedX3ds.append(emptyFrame3d)

		# Process each camera by looking for a wand and attempt a calibration. If we're happy with the results we'll
		# add it to our collection
		for ci, (c0, c1) in enumerate(zip(x2d_splits[:-1], x2d_splits[1:])):
			elapsed = frame - lastFrame[ci]
			if 0 < elapsed < attrs['jumpFrames']: continue

			# Get the 2Ds and 3Ds for the wand in this camera (if any)
			cameraDetections = x2ds[c0:c1]
			cameraX3ds = x3ds
			if not cameraDetections.any() or not cameraX3ds.any(): continue

			# Add the new detection to the existing collection as a candidate for a new collection
			if cams_collected[ci] is None or len(cams_collected[ci]) == 0:
				proposalDets, proposalX3ds = cameraDetections, cameraX3ds
			else:
				proposalDets = np.concatenate((cams_collected[ci], cameraDetections))
				proposalX3ds = np.concatenate((collectedX3ds[ci], cameraX3ds))

			# Check if we want to solve for distortion and focal length by looking at the number of samples
			# we've got already compared to our minimum number of samples required
			numSamples = len(proposalDets) / 5
			# if numSamples == minSamples: self.logger.info('Camera %d reached min samples of %d' % (ci, minSamples))
			solveTrigger = True if numSamples > minSamples else False
			solve_focal_length = attrs['solve_focal_length'] if solveTrigger else False
			solve_distortion = attrs['solve_distortion'] if solveTrigger else False

			# The wand is assumed to have 5 points so we make sure we've got at least one wand before attempting
			# to calibrate
			if len(proposalDets) >= 5 and len(proposalX3ds) >= 5:
				P, ks, rms = Calibrate.cv2_solve_camera_from_3d(proposalX3ds, proposalDets,
																solve_focal_length=solve_focal_length,
																solve_distortion=solve_distortion)

				if ks[0] < -3. or ks[0] > 3.: ks[0] = 0.
				if ks[1] < -3. or ks[1] > 3.: ks[1] = 0.

				# This shouldn't' happen but if we lose confidence in the camera we can visualise it
				# by resetting the camera error (this will change the colour in the UI)
				if rms > error_threshold:
					cameraErrors[ci] = -1
					continue

				# See how the rms for the calibration compares to the last recorded value for this camera
				prevRms = rms if rmsValues[ci] == -1 else rmsValues[ci]
				rmsDelta = rms - prevRms

				# If the rms is lower than the last recorded error for this camera then
				# we want to keep this data
				if rmsDelta <= 0 or not solveTrigger:
					cams_collected[ci] = proposalDets
					collectedX3ds[ci] = proposalX3ds
					if frame not in x3ds_frames:
						x3ds_frames[frame] = proposalX3ds[-5:]
					x2ds_frames[ci] += ([frame] * 5)
				else:
					continue

				# Record the rms value for the camera
				rmsValues[ci] = rms

				# Once we've solved for distortion etc. we are more confident with the accuracy of our
				# error so we start reporting it, where the value can be used for visualiation etc.
				if solveTrigger: cameraErrors[ci] = rms
				lastFrame[ci] = frame

				# Everything has gone well so far so we create and add the new camera matrix
				mat = Calibrate.makeMat(P, ks, (vheight[ci], vwidth[ci]))
				mats[ci] = mat
				Ps[ci] = P

		# Concatenate the results from all the cameras
		cams = [np.concatenate((cc)) for cc in cams_collected if len(cc)]
		if not cams:
			# We haven't found a wand in any camera so we just keep calm and return
			return

		# Build our collections and write to the interface
		collectedDets = np.array(np.concatenate(cams), dtype=np.float32).reshape(-1, 2), \
						Interface.makeSplitBoundaries(map(len, cams_collected))
		interface.setAttr('collect_rx2ds', collectedDets)
		interface.setAttr('collect_x3ds', collectedX3ds)
		interface.setAttr('x2ds_frames', x2ds_frames)
		interface.setAttr('x3ds_frames', x3ds_frames)
		interface.setAttr('lastFrame', lastFrame)

		# Write the calibration data to the interface and request an update at render time
		interface.setAttr('mats', mats)
		interface.setAttr('Ps', Ps)
		interface.setAttr('rms', rmsValues)
		interface.setAttr('cameraErrors', cameraErrors)
		interface.setAttr('updateMats', True)

		# Optionally display all the collected wand detections
		if 'showDetections' in attrs and attrs['showDetections']:
			colours = np.tile(dets_colours, (len(collectedDets[0]) / 5, 1))
			allAttrs = {'x2ds': collectedDets[0], 'x2ds_splits': collectedDets[1],
						'x2ds_colours': colours}
			interface.createChild('collected', 'detections', attrs=allAttrs)
예제 #4
0
def main():
    grip_dir = os.environ['GRIP_DATA']
    global g_model
    g_model = IO.load(os.path.join(grip_dir, 'aam.new.io'))[1]

    global g_predictor, reference_3d, geo_vs, geo_vts, rect
    rect = None
    pred_fn = os.path.join(grip_dir, 'pred.new.io')
    g_predictor = Face.load_predictor(pred_fn)  #, cutOff=15)
    reference_shape = g_predictor['ref_shape']
    size = reference_shape.shape[0]
    geo_vs = np.zeros((size, 3), dtype=np.float32)
    geo_vs[:size, :2] = reference_shape
    geo_vts = np.zeros((size, 2), dtype=np.float32)
    geo_vts[:size] = reference_shape + 0.5
    geo_ts = np.array([[1, 0, 0, 0], [0, 1, 0, 1000], [0, 0, 1, 0]],
                      dtype=np.float32)
    geo_fs = Face.triangulate_2D(reference_shape)
    geo_bs = []
    for p0, p1, p2 in geo_fs:
        geo_bs.append((p0, p1))
        geo_bs.append((p1, p2))
        geo_bs.append((p2, p0))
    reference_3d = np.zeros((reference_shape.shape[0], 3), dtype=np.float32)
    reference_3d[:, :2] = reference_shape * [100, 100]

    img_vs = np.array([[0, 0, 0], [640, 0, 0], [640, 480, 0], [0, 480, 0]],
                      dtype=np.float32)
    img_vts = np.array([[0, 1], [1, 1], [1, 0], [0, 0]], dtype=np.float32)
    img_fs = np.array([[0, 1, 2, 3]], dtype=np.int32)
    img_ts = np.array([[1, 0, 0, 0], [0, 1, 0, 1000], [0, 0, 1, 0]],
                      dtype=np.float32)

    geo_mesh = GLMeshes(names=['geo_mesh'],
                        verts=[geo_vs],
                        faces=[geo_fs],
                        transforms=[geo_ts],
                        bones=[geo_bs],
                        vts=[geo_vts])
    img_mesh = GLMeshes(names=['img_mesh'],
                        verts=[img_vs],
                        faces=[img_fs],
                        transforms=[img_ts],
                        bones=[None],
                        vts=[img_vts])
    kinect = freenect.init()
    tilt, roll = 0, 0

    if 1:
        kdev = freenect.open_device(kinect, 0)
        freenect.set_led(kdev, 0)  # turn off LED
        freenect.set_tilt_degs(kdev, 25)
        kstate = freenect.get_tilt_state(kdev)
        freenect.update_tilt_state(kdev)
        tilt_angle, tilt_status = kstate.tilt_angle, kstate.tilt_status
        ax, ay, az = kstate.accelerometer_x, kstate.accelerometer_y, kstate.accelerometer_z
        #bottom facing down: (85, 743, 369, 52, 0)
        #right side down: (916, 71, 96, 112, 0)
        #front side down: (52, 63, -863, -128, 0)
        freenect.close_device(kdev)
        y_axis = np.array((ax, ay, az), dtype=np.float32)
        y_axis = y_axis / np.linalg.norm(y_axis)
        roll = np.degrees(np.arctan2(ax, ay))
        tilt = -np.degrees(np.arctan2(az, (ax**2 + ay**2)**0.5))

    fovX = 62.0
    pan_tilt_roll = (0, tilt, roll)
    tx_ty_tz = (0, 1000, 6000)
    P = Calibrate.composeP_fromData((fovX, ), (pan_tilt_roll), (tx_ty_tz), 0)

    global g_camera_rays, g_camera_mat
    h, w = 480 // 2, 640 // 2
    coord, pix_coord = make_coords(h, w)
    #P = np.eye(3,4,dtype=np.float32)
    #P[0,0] = P[1,1] = 2.0
    k1, k2 = 0, 0
    g_camera_mat = Calibrate.makeMat(P, (k1, k2), [w, h])
    K, RT, P, ks, T, wh = g_camera_mat
    coord_undist = coord.copy()
    Calibrate.undistort_points_mat(coord.reshape(-1, 2), g_camera_mat,
                                   coord_undist.reshape(-1, 2))
    g_camera_rays = np.dot(coord_undist,
                           RT[:2, :3])  # ray directions (unnormalized)
    g_camera_rays -= np.dot([-K[0, 2], -K[1, 2], K[0, 0]], RT[:3, :3])
    g_camera_rays /= (np.sum(g_camera_rays**2, axis=-1)**0.5).reshape(
        h, w, 1)  # normalized ray directions
    names = ['kinect']
    vs = [np.zeros((h * w, 3), dtype=np.float32)]
    ts = [np.eye(3, 4, dtype=np.float32)]
    vts = [pix_coord * (1.0 / w, 1.0 / h)]
    faces = [make_faces(h, w)]
    mats = None
    geom_mesh = GLMeshes(names=names,
                         verts=vs,
                         faces=faces,
                         transforms=ts,
                         vts=vts)
    layers = {
        'geom_mesh': geom_mesh,
        'geo_mesh': geo_mesh,
        'img_mesh': img_mesh
    }
    QGLViewer.makeViewer(layers=layers,
                         mats=mats,
                         callback=cb,
                         timeRange=(0, 10000))
예제 #5
0
         map(lambda x: x != -1,
             x2s_labels[x2s_splits[ci]:x2s_splits[ci + 1]])
     )[0] + x2s_splits[ci]
     xls = x2s_labels[x2s_which]
     x3s_which = [list(x3s_labels).index(xi) for xi in xls]
     cv2_mat = Calibrate.cv2_solve_camera_from_3d(
         x3s[x3s_which],
         x2s[x2s_which],
         None,
         solve_distortion=True,
         solve_principal_point=False,
         solve_focal_length=True)
     rms = cv2_mat[2]
     print 'camera rms', ci, rms, 'distortion cf', cv2_mat[1], mats[ci][
         3]
     mats[ci] = Calibrate.makeMat(cv2_mat[0], cv2_mat[1], mats[ci][5])
 if True:  # try to make the fovs and distortions be shared
     f = np.mean([m[0][0, 0] for m in mats])
     k = np.mean([m[3] for m in mats], axis=0)
     for ci, m in enumerate(mats):
         m[0][0, 0] = m[0][1, 1] = f
         np.dot(m[0], m[1], m[2])
         m[3][:] = k
     ISCV.undistort_points(x2s, 0, 0, float(k[0]), float(k[1]), u2s)
     for ci, P in enumerate(
             Ps):  # second pass: enforce shared focal and distortion
         x2s_which = np.where(
             map(lambda x: x != -1,
                 x2s_labels[x2s_splits[ci]:x2s_splits[ci + 1]])
         )[0] + x2s_splits[ci]
         xls = x2s_labels[x2s_which]
예제 #6
0
def main(x2d_filename, xcp_filename, c3d_filename=None):
    '''Generate a 3D view of an x2d file, using the calibration.'''
    global x2d_frames, mats, Ps, c3d_frames, primitives, primitives2D, track3d, prev_frame, track_orn, orn_graph, boot, orn_mapper, mar_mapper
    prev_frame = None
    c3d_frames = None
    if c3d_filename != None:
        c3d_dict = C3D.read(c3d_filename)
        c3d_frames, c3d_fps, c3d_labels = c3d_dict['frames'], c3d_dict[
            'fps'], c3d_dict['labels']
    mats, xcp_data = ViconReader.loadXCP(xcp_filename)
    camera_ids = [int(x['DEVICEID']) for x in xcp_data]
    print 'loading 2d'
    x2d_dict = ViconReader.loadX2D(x2d_filename)
    x2d_frames = x2d_dict['frames']
    cameras_info = ViconReader.extractCameraInfo(x2d_dict)
    print 'num frames', len(x2d_frames)
    Ps = [m[2] / (m[0][0, 0]) for m in mats]
    track3d = Label.Track3D(mats)

    primitives = QGLViewer.makePrimitives(vertices=[], altVertices=[])
    primitives2D = QGLViewer.makePrimitives2D(([], [0]))

    global g_all_skels, md
    directory = os.path.join(os.environ['GRIP_DATA'], '151110')
    _, orn_skel_dict = IO.load(os.path.join(directory, 'orn.skel'))
    movie_fn = os.path.join(directory, '50_Grip_RoomCont_AA_02.v2.mov')
    md = MovieReader.open_file(movie_fn,
                               audio=True,
                               frame_offset=0,
                               volume_ups=10)

    asf_filename = os.path.join(directory, 'Martha.asf')
    amc_filename = os.path.join(directory, 'Martha.amc')
    asf_dict = ASFReader.read_ASF(asf_filename)
    mar_skel_dict = ASFReader.asfDict_to_skelDict(asf_dict)
    mar_skel_dict['anim_dict'] = ASFReader.read_AMC(amc_filename, asf_dict)
    for k in ('geom_Vs', 'geom_vsplits', 'geom_Gs'):
        mar_skel_dict[k] = orn_skel_dict[k].copy()
    mar_skel_dict['shape_weights'] = orn_skel_dict['shape_weights']
    mar_skel_dict['geom_dict'] = orn_skel_dict['geom_dict']

    orn_vss = ViconReader.loadVSS(os.path.join(directory, 'Orn.vss'))
    orn_vss_chan_mapping = [
        orn_vss['chanNames'].index(n) for n in orn_skel_dict['chanNames']
    ]
    orn_anim_dict = orn_skel_dict['anim_dict']
    orn_vss_anim = np.zeros(
        (orn_anim_dict['dofData'].shape[0], orn_vss['numChans']),
        dtype=np.float32)
    orn_vss_anim[:, orn_vss_chan_mapping] = orn_anim_dict['dofData']
    orn_anim_dict['dofData'] = orn_vss_anim
    orn_vss['anim_dict'] = orn_anim_dict
    for x in [
            'geom_dict', 'geom_Vs', 'geom_vsplits', 'geom_Gs', 'shape_weights'
    ]:
        orn_vss[x] = orn_skel_dict[x]
    orn_skel_dict = orn_vss

    g_all_skels = {}
    orn_mesh_dict, orn_skel_mesh, orn_geom_mesh = orn_t = Character.make_geos(
        orn_skel_dict)
    g_all_skels['orn'] = (orn_skel_dict, orn_t)
    orn_skel_dict['chanValues'][:] = 0
    Character.updatePoseAndMeshes(orn_skel_dict, orn_skel_mesh, orn_geom_mesh)

    mar_mesh_dict, mar_skel_mesh, mar_geom_mesh = mar_t = Character.make_geos(
        mar_skel_dict)
    g_all_skels['mar'] = (mar_skel_dict, mar_t)

    #ted_mesh_dict, ted_skel_mesh, ted_geom_mesh = ted_t = Character.make_geos(ted_skel_dict)
    #g_all_skels['ted'] = (ted_skel_dict, ted_t)
    #ted_skel_dict['chanValues'][0] += 1000
    #Character.updatePoseAndMeshes(ted_skel_dict, ted_skel_mesh, ted_geom_mesh)

    mnu = orn_skel_dict['markerNamesUnq']
    mns = orn_skel_dict['markerNames']
    effectorLabels = np.array([mnu.index(n) for n in mns], dtype=np.int32)
    orn_graph = Label.graph_from_skel(orn_skel_dict, mnu)
    boot = -10

    track_orn = Label.TrackModel(orn_skel_dict, effectorLabels, mats)

    #ted = GLSkel(ted_skel_dict['Bs'], ted_skel_dict['Gs']) #, mvs=ted_skel_dict['markerOffsets'], mvis=ted_skel_dict['markerParents'])
    #ted = GLSkeleton(ted_skel_dict['jointNames'],ted_skel_dict['jointParents'], ted_skel_dict['Gs'][:,:,3])
    #ted.setName('ted')
    #ted.color = (1,1,0)
    #orn = GLSkeleton(orn_skel_dict['jointNames'],orn_skel_dict['jointParents'], orn_skel_dict['Gs'][:,:,3])
    #orn.setName('orn')
    #orn.color = (0,1,1)

    #square = GLMeshes(names=['square'],verts=[[[0,0,0],[1000,0,0],[1000,1000,0],[0,1000,0]]],vts=[[[0,0],[1,0],[1,1],[0,1]]],faces=[[[0,1,2,3]]],fts=[[[0,1,2,3]]])
    #square.setImageData(np.array([[[0,0,0],[255,255,255]],[[255,255,255],[0,0,0]]],dtype=np.uint8))
    #orn_geom_mesh.setImageData(np.array([[[0,0,0],[255,255,255]],[[255,255,255],[0,0,0]]],dtype=np.uint8))

    P = Calibrate.composeP_fromData((60.8, ), (-51.4, 14.7, 3.2),
                                    (6880, 2860, 5000),
                                    0)  # roughed in camera for 151110
    ks = (0.06, 0.0)
    mat = Calibrate.makeMat(P, ks, (1080, 1920))
    orn_mapper = Opengl.ProjectionMapper(mat)
    orn_mapper.setGLMeshes(orn_geom_mesh)
    orn_geom_mesh.setImage((md['vbuffer'], (md['vheight'], md['vwidth'], 3)))

    mar_mapper = Opengl.ProjectionMapper(mat)
    mar_mapper.setGLMeshes(mar_geom_mesh)
    mar_geom_mesh.setImage((md['vbuffer'], (md['vheight'], md['vwidth'], 3)))

    global g_screen
    g_screen = Opengl.make_quad_distortion_mesh()

    QGLViewer.makeViewer(mat=mat,md=md,layers = {\
		#'ted':ted, 'orn':orn,
		#'ted_skel':ted_skel_mesh,'ted_geom':ted_geom_mesh,\
		#'square':square,



     'orn_skel':orn_skel_mesh,'orn_geom':orn_geom_mesh,\
     'mar_skel':mar_skel_mesh,'mar_geom':mar_geom_mesh,\
      },
    primitives=primitives, primitives2D=primitives2D, timeRange=(0, len(x2d_frames) - 1, 4, 25.0), callback=intersectRaysCB, mats=mats,camera_ids=camera_ids)
예제 #7
0
def main():
	from UI import QGLViewer
	from UI import GLMeshes, GLPoints3D

	global g_setting_frame
	g_setting_frame = False
	# static data
	global g_webcam, g_md, g_rbfn, g_predictor, g_head_pan_shape, g_head_tilt_shape
	# runtime options and state
	global g_prev_smooth_shape, g_prev_vs, g_hmc_boot, g_neutral_corrective_shape, g_settle, g_head_pan_tilt_roll, g_smooth_pose
	global g_directory, g_TIS_server, g_mode, g_frame

	g_TIS_server = SocketServer.SocketServer()
	g_mode, g_frame = 0,{}

	grip_dir = os.environ['GRIP_DATA']
	g_directory = grip_dir

	g_webcam,g_md = None,None

	g_prev_vs, g_prev_smooth_shape = None,None
	g_hmc_boot = None
	#clear_neutral()
	g_neutral_corrective_shape = IO.load(os.path.join(g_directory,'neutral.out'))[1]
	g_settle = -1
	g_head_pan_tilt_roll = None
	g_smooth_pose = {}

	aam = IO.load(os.path.join(g_directory,'aam.out'))[1]
	if 0:
		svt = np.float32(aam['shapes']).reshape(-1,140)
		svt = np.dot(aam['shapes_u'],aam['shapes_s'].reshape(-1,1)*aam['shapes_vt'])
		svt = aam['shapes_s'].reshape(-1,1)*aam['shapes_vt']
		tmp = svt.reshape(svt.shape[0],-1,2)
		Sx,Sy = tmp[:,:,0],tmp[:,:,1]
		tmp = np.dot(np.dot(Sy.T,np.dot(Sx,Sx.T)),Sy)
		u,s,vt = np.linalg.svd(tmp, full_matrices=False)
		print s
		g_head_pan_shape = np.zeros((svt.shape[1]/2,2),dtype=np.float32)
		g_head_tilt_shape = np.zeros((svt.shape[1]/2,2),dtype=np.float32)
		g_head_pan_shape[:,0] = g_head_tilt_shape[:,1] = vt[0]
		print np.sum(g_head_pan_shape * aam['shapes_vt'][0].reshape(-1,2))
		print np.sum(g_head_tilt_shape * aam['shapes_vt'][1].reshape(-1,2))
	g_head_pan_shape = aam['shapes_vt'][0].reshape(-1,2)
	g_head_tilt_shape = aam['shapes_vt'][1].reshape(-1,2)
	g_head_tilt_shape = g_head_pan_shape[:,::-1]*np.float32([1,-1])
	print np.sum(g_head_pan_shape*g_head_tilt_shape)
	g_head_pan_shape *= np.linalg.norm(g_head_pan_shape)**-0.5
	g_head_tilt_shape *= np.linalg.norm(g_head_tilt_shape)**-0.5
	if np.sum(g_head_pan_shape[:,0] < 1): g_head_pan_shape = -g_head_pan_shape
	if np.sum(g_head_tilt_shape[:,1] > 1): g_head_tilt_shape = -g_head_tilt_shape
	#print np.sum(g_head_pan_shape * g_head_tilt_shape)
	#print np.dot(g_head_pan_shape[:,0],g_head_tilt_shape[:,1])

	g_predictor = Face.load_predictor(os.path.join(g_directory,'train.out'))
	rbfn_filename = os.path.join(g_directory,'rbfn.out')
	g_rbfn = IO.load(rbfn_filename)[1]
	#g_rbfn = convert_rbfn(rbfn_in_filename)
	#IO.save(rbfn_filename, g_rbfn)

	
	ref_shape = g_predictor['ref_shape']
	cx,cy = np.mean(ref_shape,axis=0)
	vx,vy = (np.var(ref_shape,axis=0)**0.5) * 2.5
	geo_bs = []
	ref_fs = Face.triangulate_2D(ref_shape)
	for p0,p1,p2 in ref_fs:
		geo_bs.append((p0,p1))
		geo_bs.append((p1,p2))
		geo_bs.append((p2,p0))
	geo_vs = np.zeros((len(ref_shape),3), dtype=np.float32)
	geo_fs = []
	geo_ts = np.float32([[1,0,0,0],[0,1,0,1000],[0,0,1,0]])
	geo_vts = np.zeros_like(ref_shape)
	
	img_vs = np.float32([[-1000,-1000,0],[1000,-1000,0],[1000,1000,0],[-1000,1000,0]])
	img_fs = np.int32([[0,1,2,3]])
	img_ts = np.float32([[1,0,0,0],[0,1,0,1000],[0,0,1,0]])
	img_vts = np.float32([[0,1],[1,1],[1,0],[0,0]])
	markup_mesh = GLPoints3D(vertices=geo_vs, edges=np.int32(geo_bs), names=[], colour=[0,1,0,1],edgeColour=[1,1,1,1])
	geo_mesh = GLMeshes(names=['geo_mesh'],verts=[geo_vs],faces=[geo_fs],transforms=[geo_ts],bones=[geo_bs], vts=[geo_vts], colour=[1,0,0,1])
	image_mesh = GLMeshes(names=['image_mesh'],verts=[img_vs],faces=[img_fs],transforms=[img_ts],vts=[img_vts])

	global g_bs_vs, g_bs_shape_mat, g_bs_fs, g_bs_vts, g_bs_shape_mat_T
	bs_dict = IO.load(os.path.join(g_directory,'harpy_ma.out'))[1]['blendShapes']['Harpy_cFace_GEOShape']
	obj_scale = 10.0
	g_bs_vs = np.float32(bs_dict['vs']*obj_scale)
	bs_dict['pts'] = [b*obj_scale for b in bs_dict['pts']]
	g_bs_fs = bs_dict['fs'] # warning: mix of quads and triangles :-(
	assert bs_dict['vts'].keys() == range(len(bs_dict['vts'].keys()))
	g_bs_vts = bs_dict['vts'].values()
	g_bs_ts = np.float32([[1,0,0,800],[0,1,0,-600],[0,0,1,300]])
	bs_mesh = GLMeshes(names=['bs_mesh'],verts=[g_bs_vs],faces=[g_bs_fs],transforms=[g_bs_ts],vts=[g_bs_vts],visible=False)

	rbfn_groups, rbfn_slider_splits, rbfn_slider_names, rbfn_marker_names = extract_groups(g_rbfn)
	slider_names = [(x[8:-2]+'.translateY' if x.startswith('get_ty') else x) for x in bs_dict['wt_names']]
	try:
		slider_order = [slider_names.index(x) for x in rbfn_slider_names]
	except Exception as e:
		print 'error',e
		slider_order = []
	g_bs_shape_mat = bs_dict['matrix'] = np.zeros((len(bs_dict['pts']), len(bs_dict['vs']), 3),dtype=np.float32)
	for m,ct,pt in zip(g_bs_shape_mat,bs_dict['cts'],bs_dict['pts']): m[ct] = pt
	g_bs_shape_mat = g_bs_shape_mat[slider_order]
	g_bs_shape_mat_T = g_bs_shape_mat.transpose(1,2,0).copy()

	layers = {'image_mesh':image_mesh,'geo_mesh':geo_mesh,'bs_mesh':bs_mesh,'markup_mesh':markup_mesh}
	app,win = QGLViewer.makeApp()
	outliner = win.qoutliner
	#for gi,geo in enumerate(layers.keys()): outliner.addItem(geo, data='_OBJ_'+geo, index=gi)

	State.setKey('ui',{'type':'ui','attrs':{\
		'harpy_xoffset':300.0,'show_harpy':True,'rotate':0,'mirroring':False,'unreal':True,'streaming_TIS':False,\
		'using_webcam':False,'HMC_mode':True,'booting':True,'filtering':True,'setting_neutral':True,'debugging':False, \
		'webcam_index':0,'cam_offset':700,'cam_fps':50,'cam_width':1280,'cam_height':720, 'movie_filename':''}})
	if True: # running on deployed face machine at 720p50
		State.setKey('/root/ui',{'type':'ui','attrs':{\
			'harpy_xoffset':300.0,'show_harpy':False,'rotate':1,'mirroring':False,'unreal':True,'streaming_TIS':False,\
			'using_webcam':True,'HMC_mode':True,'booting':True,'filtering':True,'setting_neutral':True,'debugging':False, \
			'webcam_index':0,'cam_offset':700,'cam_fps':50,'cam_width':1280,'cam_height':720, 'movie_filename':''}})
	win.setFields('ui',     [
		('show_harpy',      'show_harpy','Whether to display the Harpy','bool', False),
		('harpy_xoffset',   'xoffset', 'Pixels to offset Harpy to right', 'float', 300.0),
		('rotate',          'rotation','Rotate image 0=up,1=left,2=down,3=right,-1=any angle','int', 0),
		('mirroring',       'mirror',  'Show reversed',                 'bool', False),
		('unreal',          'unreal',  'Whether to connect to unreal',  'bool', True),
		#('streaming_TIS',   'streaming_TIS',  'Whether currently streaming',   'bool', False),
		('using_webcam',    'webcam',  'Whether using the webcam',      'bool', False),
		('HMC_mode',        'HMC_mode','Boot every frame',              'bool', True),
		('booting',         'boot',    'Boot at next chance',           'bool', True),
		('filtering',       'filter',  'Whether to filter noise',       'bool', True),
		('setting_neutral', 'neutral', 'Set neutral at next chance',    'bool', False),
		('debugging',       'debug',   'Show rbfn input for debugging', 'bool', False),
		('webcam_index',    'camindex', 'The index of the webcam',      'int',  0),
		('cam_offset',      'camoffset','The offset of the webcam',     'int',  700),
		('cam_fps',         'fps',      'The frame rate of the webcam', 'int',  50),
		('cam_width',       'width',    'The width of the webcam image', 'int',  1280),
		('cam_height',      'height',   'The height of the webcam image', 'int',  720),
		('movie_filename',  'movie',   'The filename of the movie', 'string',  ''),
		])
	slider_names = sorted(g_rbfn['slider_names'])
	win.setFields('sliders', [(sn,sn,'Slider %d'%si,'float',0.0) for si,sn in enumerate(slider_names)])
	State.setKey('/root/sliders', {'type':'sliders','attrs':{sn:0.0 for sn in slider_names}})
	outliner.set_root('/root')
	#outliner.addItem('sliders', data='sliders', index=1)
	win.outliner.raise_()
	#win.select('ui')
	QApp.app.dirtyCB = dirty_cb
	QApp.app.addMenuItem({'menu':'&File','item':'Import &movie','tip':'Import a movie file','cmd':import_movie})
	QApp.app.addMenuItem({'menu':'&Edit','item':'Retrain rbfn','tip':'Train the rbfn','cmd':retrain_RBFN})
	QApp.app.addMenuItem({'menu':'&Edit','item':'Retrain rbfn (no linear)','tip':'Train the rbfn with no linear part','cmd':retrain_RBFN_no_linear})
	QApp.app.addMenuItem({'menu':'&Edit','item':'Retrack refresh rbfn','tip':'Refresh the rbfn','cmd':retrack_refresh_rbfn})
	QApp.app.addMenuItem({'menu':'&Edit','item':'Retrack remap rbfn','tip':'Rebuild the rbfn','cmd':retrack_remap_rbfn})
	QApp.app.addMenuItem({'menu':'&File','item':'Export rbfn','tip':'Export the rbfn','cmd':export_rbfn})
	State.clearUndoStack()
	QGLViewer.makeViewer(appName='StreamVideoTrack',timeRange=(0,100), callback=setFrame_cb, keyCallback=keypress_cb, layers=layers, mats=[Calibrate.makeMat(Calibrate.composeRT(np.eye(3)*[10,10,1],[0,1000,6000],1000),[0,0],[1920,1080])], camera_ids=['RBFN'])

	# Ensure the server has stopped when program terminates
	g_TIS_server.Stop()