Esempio n. 1
0
 def setFrame(fi):
     QGLViewer.timeline.frameStep = 1
     drawing_skel2 = True
     global animDict
     dofs = animDict['dofData'][(fi - animDict['frameNumbers'][0]) %
                                len(animDict['frameNumbers'])].copy()
     #dofs[[2,5]] = dofData[0,[2,5]]
     Character.pose_skeleton(skelDict['Gs'], skelDict, dofs)
     QGLViewer.skel.setPose(skelDict['Gs'])
     if drawing_skel2:
         dofs = skelDict2['dofData'][(fi - skelDict2['frameNumbers'][0]) %
                                     len(skelDict2['frameNumbers'])].copy()
         Character.pose_skeleton(skelDict2['Gs'], skelDict2, dofs)
         QGLViewer.skel2.setPose(skelDict['Gs'])
     QGLViewer.view.updateGL()
Esempio n. 2
0
def scoreIK(skelDict, chanValues, effectorData, effectorTargets, rootMat=None):
    """
	Args:
		skelDict (GskelDict): The Skeleton to process

	Returns:
		?

	Requires:
		Character.pose_skeleton
		ISCV.score_effectors
	"""
    Character.pose_skeleton(skelDict['Gs'], skelDict, chanValues, rootMat)
    return (
        ISCV.score_effectors(skelDict['Gs'], effectorData[0], effectorData[1],
                             effectorData[2], effectorTargets) /
        np.sum(effectorData[1]))**0.5
Esempio n. 3
0
    def cook(self, location, interface, attrs):
        skelDict = interface.attr('skelDict')
        if skelDict is None: return

        animationLocation = attrs['animation']
        if not animationLocation: animationLocation = location
        animDict = interface.attr('animDict', atLocation=animationLocation)
        if animDict is not None:
            # Check if the source animation has indicated that we should be using a particular frame (e.g. due to offsets and step size)
            frame = interface.attr('frame', atLocation=animationLocation)
            if frame is None: frame = interface.frame()

            animData = animDict['dofData'][frame]
            if skelDict['numChans'] != len(animData):
                fle = skelDict['chanValues'].copy()
                animSkelDict = interface.attr('skelDict',
                                              atLocation=animationLocation)
                chanIdxs = [
                    skelDict['chanNames'].index(cn)
                    for ci, cn in enumerate(animSkelDict['chanNames'])
                    if cn in skelDict['chanNames']
                ]
                fle[chanIdxs] = animData
                animData = fle

            Character.updatePose(skelDict, animData)

        else:
            rootMat = skelDict['rootMat'] if 'rootMat' in skelDict else None
            Character.updatePose(skelDict, x_mat=rootMat)

        interface.setAttr('Gs',
                          skelDict['Gs'],
                          atLocation=location + '/meshes')
        if 'geom_dict' in skelDict:
            interface.setAttr('geom_Gs',
                              skelDict['geom_Gs'],
                              atLocation=location + '/meshes')
            interface.setAttr('geom_Vs',
                              skelDict['geom_Vs'],
                              atLocation=location + '/meshes')
Esempio n. 4
0
def set_frame_CB(fi):
    view = QApp.view()
    skel_mesh = view.getLayer('skel')
    global g_anim_dict, g_skel_dict
    t = g_anim_dict['t']
    r = g_anim_dict['r']
    chan_values = g_skel_dict['chanValues']
    jcs = g_skel_dict['jointChans']
    jcss = g_skel_dict['jointChanSplits']
    num_joints = g_skel_dict['numJoints']
    anim = []
    time_sec = fi / 120.  # TODO time range, fps
    for ji in range(num_joints):
        for ti in range(jcss[2 * ji], jcss[2 * ji + 1]):
            anim.append(sample(t[ji][jcs[ti]], time_sec) * 10.0)
        for ri in range(jcss[2 * ji + 1], jcss[2 * ji + 2]):
            anim.append(np.radians(sample(r[ji][jcs[ri] - 3], time_sec)))
    #print ji,anim[:10]
    g_skel_dict['chanValues'][:] = anim
    from GCore import Character
    Character.updatePoseAndMeshes(g_skel_dict, skel_mesh, None)
    #print g_skel_dict['Gs'][:3]
    view.updateGL()
Esempio n. 5
0
    def cook(self, location, interface, attrs):
        if self.skelDict is None or self.mesh_dict is None:
            skelFilename = self.resolvePath(attrs['skelFilename'])

            # Use the filename if given to load the skeleton dictionary, otherwise use the cooked skeleton
            if skelFilename:
                try:
                    _, self.skelDict = IO.load(skelFilename)

                except Exception as e:
                    self.logger.error(
                        'Could not open skeleton: \'{}\''.format(skelFilename))
                    return
            else:
                self.skelDict = interface.attr('skelDict')

            if self.skelDict is None: return
            rootMat = self.skelDict[
                'rootMat'] if 'rootMat' in self.skelDict else None
            # TODO: This should happen in the render callback
            self.mesh_dict, self.skel_mesh, self.geom_mesh = Character.make_geos(
                self.skelDict, rootMat)

        # Test updating on the fly (TEMP)
        rootMat = self.skelDict[
            'rootMat'] if 'rootMat' in self.skelDict else None
        Character.updatePose(self.skelDict, x_mat=rootMat)

        interface.setAttr('skelDict', self.skelDict)
        interface.setAttr('meshDict', self.mesh_dict)

        charAttrs = {
            'skeleton': self.skel_mesh,
            'geometry': self.geom_mesh,
            'geo_colour': eval(attrs['geoColour'])
        }
        interface.createChild('meshes', 'character', attrs=charAttrs)
Esempio n. 6
0
def skeleton_marker_positions(skelDict,
                              rootMat,
                              chanValues,
                              effectorLabels,
                              effectorData,
                              markerWeights=None):
    """
	Based on the pose implied by the chanValues and rootMat, compute the 3D world-space
	positions of the markers.
	
	Multiple effectors may determine the position of the marker. effectorLabels provides this mapping.
	
	The weights for the markers, if any, are set by markerWeights.
	
	Args:
		skelDict (GskelDict): the skeleton
		rootMat (float[3][4]): reference frame of the Skeleton.
		chanValues (float[]) List of channel values to pose the skeleton
		effectorLabels : the marker that each effector determines
		effectorData : (effectorJoints, effectorOffsets, ...)
		markerWeights : the weight that each effector has on its marker
		
	Returns:
		int[]: Labels for the 3D positions of the markers.
		float[][3]: 3D positions of where the target would be in the pose.
		
	Requires:
		Character.pose_skeleton
		ISCV.marker_positions
		
	"""
    Character.pose_skeleton(skelDict['Gs'], skelDict, chanValues, rootMat)
    labels = np.unique(effectorLabels)
    els2 = np.int32([list(labels).index(x) for x in effectorLabels])
    x3ds = ISCV.marker_positions(skelDict['Gs'], effectorData[0],
                                 effectorData[1], els2, markerWeights)
    return x3ds, labels
Esempio n. 7
0
def loadVSS(fn):
	'''Decode a Vicon Skeleton file (VST format). VSK is labeling skeleton. VSS is solving skeleton.'''
	import xml.etree.cElementTree as ET
	import numpy as np
	dom = ET.parse(fn)
	parameters = dom.findall('Parameters')[0]
	params = dict([(p.get('NAME'),p.get('VALUE')) for p in parameters])
	sticks = dom.findall('MarkerSet')[0].find('Sticks')
	sticksPairs = [(x.get('MARKER1'),x.get('MARKER2')) for x in sticks]
	sticksColour= [np.fromstring(x.get('RGB1', '255 255 255'), dtype=np.uint8, sep=' ') for x in sticks]
	hasTargetSet = True
	try: markers = dom.findall('TargetSet')[0].find('Targets')
	except: markers = dom.findall('MarkerSet')[0].find('Markers'); hasTargetSet = False
	markerOffsets = [x.get('POSITION').split() for x in markers]
	def ev(x,params):
		for k,v in params.items(): x = x.replace(k,v)
		return float(x) # eval(x)
	markerOffsets = [[ev(x,params) for x in mp] for mp in markerOffsets]
	markerColour= [np.fromstring(col, dtype=np.uint8, sep=' ') for col in \
						[x.get('MARKER', x.get('RGB')) for x in dom.findall('MarkerSet')[0].find('Markers')]]
	colouredMarkers = [x.get('MARKER', x.get('NAME')) for x in dom.findall('MarkerSet')[0].find('Markers')]
	markerNames = [x.get('MARKER', x.get('NAME')) for x in markers]
	markerWeights = [float(x.get('WEIGHT')) if hasTargetSet else 1.0 for x in markers]
	markerParents = [x.get('SEGMENT') for x in markers]
	skeleton = dom.findall('Skeleton')[0]
	# skeleton is defined as a tree of Segments
	# Segment contains Joint and Segment
	# Joint is JointDummy(0)/JointHinge(1)/JointHardySpicer(2)/JointBall(3)/JointFree(6), containing JointTemplate
	def ap(skeleton, parent, skel):
		for seg in skeleton:
			if seg.tag == 'Segment':
				skel.append([seg.get('NAME'),parent,seg.attrib])
				ap(seg, len(skel)-1, skel)
			else:
				skel[parent].extend([seg.tag,seg.attrib,{} if len(seg) == 0 else seg[0].attrib])
		return skel
	# recursively parse the skeleton
	root = ap(skeleton, -1, [])
	assert(len(markerParents) == len(markerOffsets))
	def cqToR(rs, R):
		'''Given a compressed quaternion, form a 3x3 rotation matrix.'''
		angle = np.dot(rs,rs)**0.5
		scale = (np.sin(angle*0.5)/angle if angle > 1e-8 else 0.5)
		q = np.array([rs[0]*scale,rs[1]*scale,rs[2]*scale,np.cos(angle*0.5)], dtype=np.float32)
		q = np.outer(q, q)*2
		R[:3,:3] = [
			[1.0-q[1, 1]-q[2, 2],     q[0, 1]-q[2, 3],     q[0, 2]+q[1, 3]],
			[    q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2],     q[1, 2]-q[0, 3]],
			[    q[0, 2]-q[1, 3],     q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1]]]
	def float3(x): return np.array(map(lambda x:ev(x,params), x.split()),dtype=np.float32)
	def mats(x):
		preT = x.get('PRE-POSITION', '0 0 0')
		postT = x.get('POST-POSITION', '0 0 0')
		preR = x.get('PRE-ORIENTATION', '0 0 0')
		postR = x.get('POST-ORIENTATION', '0 0 0')
		pre = np.zeros((3,4),dtype=np.float32)
		post = np.zeros((3,4),dtype=np.float32)
		pre[:,3] = float3(preT)
		post[:,3] = float3(postT)
		cqToR(float3(preR), pre[:3,:3])
		cqToR(float3(postR), post[:3,:3])
		return pre,post
	name = fn.rpartition('/')[2].partition('.')[0]
	numBones = len(root)
	jointNames = [r[0] for r in root]
	markerParents = np.array([jointNames.index(mp) for mp in markerParents],dtype=np.int32)
	jointNames[0] = 'root' # !!!! WARNING !!!!
	jointParents = [r[1] for r in root]
	jointData = [mats(r[4]) for r in root]
	jointTypes = [r[3] for r in root] # JointDummy(0)/JointHinge(1)/JointHardySpicer(2)/JointBall(3)/JointFree(6)
	#jointTemplates = [mats(r[5]) for r in root] # JointTemplate ... contains the same data as jointTypes
	jointAxes = [r[4].get('AXIS',r[4].get('AXIS-PAIR',r[4].get('EULER-ORDER','XYZ'))) for r in root] # order
	jointTs = [r[4].get('T',None) for r in root]
	Gs = np.zeros((numBones,3,4),dtype=np.float32) # GLOBAL mats
	Ls = np.zeros((numBones,3,4),dtype=np.float32) # LOCAL mats
	Bs = np.zeros((numBones,3),dtype=np.float32) # BONES
	for ji,pi in enumerate(jointParents):
		if pi == -1: Ls[ji] = jointData[ji][0]
		else: np.dot(jointData[pi][1][:,:3],jointData[ji][0],out=Ls[ji]); Ls[ji,:,3] += jointData[pi][1][:,3]
	dofNames = []
	jointChans = [] # tx=0,ty,tz,rx,ry,rz
	jointChanSplits = [0]
	# TODO: locked channels
	for ji,(jt,T) in enumerate(zip(jointTypes,jointTs)):
		jointChanSplits.append(len(jointChans))
		if jt == 'JointDummy': assert(T is None)
		elif jt == 'JointHinge':
			assert(T == '* ')
			jointChans.append(jointAxes[ji].split().index('1')+3)
		elif jt == 'JointHardySpicer':
			assert(T == '* * ')
			ja = jointAxes[ji].split()
			jointChans.append(ja.index('1',3))
			jointChans.append(ja.index('1')+3)
		elif jt == 'JointBall':
			assert(T == '* * * ')
			ja = jointAxes[ji]
			jointChans.append(ord(ja[0])-ord('X')+3)
			jointChans.append(ord(ja[1])-ord('X')+3)
			jointChans.append(ord(ja[2])-ord('X')+3)
		elif jt == 'JointFree':
			assert(T == '* * * * * * ' or T is None) # version 1 of the file apparently doesn't fill this!
			ja = jointAxes[ji]
			jointChans.append(0)
			jointChans.append(1)
			jointChans.append(2)
			jointChanSplits[-1] = len(jointChans)
			jointChans.append(ord(ja[0])-ord('X')+3)
			jointChans.append(ord(ja[1])-ord('X')+3)
			jointChans.append(ord(ja[2])-ord('X')+3)
		for jc in jointChans[jointChanSplits[-2]:]:
			dofNames.append(jointNames[ji]+':'+'tx ty tz rx ry rz'.split()[jc])
		jointChanSplits.append(len(jointChans))
	numDofs = len(dofNames)
	# fill Gs
	chanValues = np.zeros(numDofs,dtype=np.float32)
	rootMat = np.eye(3, 4, dtype=np.float32)

	# fill Bs; TODO add dummy joints to store the extra bones (where multiple joints have the same parent)
	for ji,pi in enumerate(jointParents):
		if pi != -1: Bs[pi] = Ls[ji,:,3]
	Bs[np.where(Bs*Bs<0.01)] = 0 # zero out bones < 0.1mm

	# TODO: compare skeleton with ASF exported version
	skel_dict = {
			'markerOffsets'  : np.array(markerOffsets, dtype=np.float32),
			'markerParents'  : markerParents,
			'markerNames'    : markerNames,
			'markerNamesUnq' : colouredMarkers,
			'markerColour'   : markerColour,
			'markerWeights'  : np.array(markerWeights,dtype=np.float32),
			'numMarkers'     : len(markerNames),
			'sticks'         : sticksPairs,
			'sticksColour'   : sticksColour,
			'name'           : str(name),
			'numJoints'      : int(numBones),
			'jointNames'     : jointNames,  # list of strings
			'jointIndex'     : dict([(k,v) for v,k in enumerate(jointNames)]), # dict of string:int
			'jointParents'   : np.array(jointParents,dtype=np.int32),
			'jointChans'     : np.array(jointChans,dtype=np.int32), # 0 to 5 : tx,ty,tz,rx,ry,rz
			'jointChanSplits': np.array(jointChanSplits,dtype=np.int32),
			'chanNames'      : dofNames,   # list of strings
			'chanValues'     : np.zeros(numDofs,dtype=np.float32),
			'numChans'       : int(numDofs),
			'Bs'             : np.array(Bs, dtype=np.float32),
			'Ls'             : np.array(Ls, dtype=np.float32),
			'Gs'             : np.array(Gs, dtype=np.float32),
			'rootMat'        : rootMat,
			}
	Character.pose_skeleton(skel_dict['Gs'], skel_dict)
	return skel_dict
Esempio n. 8
0
def main(x2d_filename, xcp_filename, c3d_filename=None):
    '''Generate a 3D view of an x2d file, using the calibration.'''
    global x2d_frames, mats, Ps, c3d_frames, primitives, primitives2D, track3d, prev_frame, track_orn, orn_graph, boot, orn_mapper, mar_mapper
    prev_frame = None
    c3d_frames = None
    if c3d_filename != None:
        c3d_dict = C3D.read(c3d_filename)
        c3d_frames, c3d_fps, c3d_labels = c3d_dict['frames'], c3d_dict[
            'fps'], c3d_dict['labels']
    mats, xcp_data = ViconReader.loadXCP(xcp_filename)
    camera_ids = [int(x['DEVICEID']) for x in xcp_data]
    print 'loading 2d'
    x2d_dict = ViconReader.loadX2D(x2d_filename)
    x2d_frames = x2d_dict['frames']
    cameras_info = ViconReader.extractCameraInfo(x2d_dict)
    print 'num frames', len(x2d_frames)
    Ps = [m[2] / (m[0][0, 0]) for m in mats]
    track3d = Label.Track3D(mats)

    primitives = QGLViewer.makePrimitives(vertices=[], altVertices=[])
    primitives2D = QGLViewer.makePrimitives2D(([], [0]))

    global g_all_skels, md
    directory = os.path.join(os.environ['GRIP_DATA'], '151110')
    _, orn_skel_dict = IO.load(os.path.join(directory, 'orn.skel'))
    movie_fn = os.path.join(directory, '50_Grip_RoomCont_AA_02.v2.mov')
    md = MovieReader.open_file(movie_fn,
                               audio=True,
                               frame_offset=0,
                               volume_ups=10)

    asf_filename = os.path.join(directory, 'Martha.asf')
    amc_filename = os.path.join(directory, 'Martha.amc')
    asf_dict = ASFReader.read_ASF(asf_filename)
    mar_skel_dict = ASFReader.asfDict_to_skelDict(asf_dict)
    mar_skel_dict['anim_dict'] = ASFReader.read_AMC(amc_filename, asf_dict)
    for k in ('geom_Vs', 'geom_vsplits', 'geom_Gs'):
        mar_skel_dict[k] = orn_skel_dict[k].copy()
    mar_skel_dict['shape_weights'] = orn_skel_dict['shape_weights']
    mar_skel_dict['geom_dict'] = orn_skel_dict['geom_dict']

    orn_vss = ViconReader.loadVSS(os.path.join(directory, 'Orn.vss'))
    orn_vss_chan_mapping = [
        orn_vss['chanNames'].index(n) for n in orn_skel_dict['chanNames']
    ]
    orn_anim_dict = orn_skel_dict['anim_dict']
    orn_vss_anim = np.zeros(
        (orn_anim_dict['dofData'].shape[0], orn_vss['numChans']),
        dtype=np.float32)
    orn_vss_anim[:, orn_vss_chan_mapping] = orn_anim_dict['dofData']
    orn_anim_dict['dofData'] = orn_vss_anim
    orn_vss['anim_dict'] = orn_anim_dict
    for x in [
            'geom_dict', 'geom_Vs', 'geom_vsplits', 'geom_Gs', 'shape_weights'
    ]:
        orn_vss[x] = orn_skel_dict[x]
    orn_skel_dict = orn_vss

    g_all_skels = {}
    orn_mesh_dict, orn_skel_mesh, orn_geom_mesh = orn_t = Character.make_geos(
        orn_skel_dict)
    g_all_skels['orn'] = (orn_skel_dict, orn_t)
    orn_skel_dict['chanValues'][:] = 0
    Character.updatePoseAndMeshes(orn_skel_dict, orn_skel_mesh, orn_geom_mesh)

    mar_mesh_dict, mar_skel_mesh, mar_geom_mesh = mar_t = Character.make_geos(
        mar_skel_dict)
    g_all_skels['mar'] = (mar_skel_dict, mar_t)

    #ted_mesh_dict, ted_skel_mesh, ted_geom_mesh = ted_t = Character.make_geos(ted_skel_dict)
    #g_all_skels['ted'] = (ted_skel_dict, ted_t)
    #ted_skel_dict['chanValues'][0] += 1000
    #Character.updatePoseAndMeshes(ted_skel_dict, ted_skel_mesh, ted_geom_mesh)

    mnu = orn_skel_dict['markerNamesUnq']
    mns = orn_skel_dict['markerNames']
    effectorLabels = np.array([mnu.index(n) for n in mns], dtype=np.int32)
    orn_graph = Label.graph_from_skel(orn_skel_dict, mnu)
    boot = -10

    track_orn = Label.TrackModel(orn_skel_dict, effectorLabels, mats)

    #ted = GLSkel(ted_skel_dict['Bs'], ted_skel_dict['Gs']) #, mvs=ted_skel_dict['markerOffsets'], mvis=ted_skel_dict['markerParents'])
    #ted = GLSkeleton(ted_skel_dict['jointNames'],ted_skel_dict['jointParents'], ted_skel_dict['Gs'][:,:,3])
    #ted.setName('ted')
    #ted.color = (1,1,0)
    #orn = GLSkeleton(orn_skel_dict['jointNames'],orn_skel_dict['jointParents'], orn_skel_dict['Gs'][:,:,3])
    #orn.setName('orn')
    #orn.color = (0,1,1)

    #square = GLMeshes(names=['square'],verts=[[[0,0,0],[1000,0,0],[1000,1000,0],[0,1000,0]]],vts=[[[0,0],[1,0],[1,1],[0,1]]],faces=[[[0,1,2,3]]],fts=[[[0,1,2,3]]])
    #square.setImageData(np.array([[[0,0,0],[255,255,255]],[[255,255,255],[0,0,0]]],dtype=np.uint8))
    #orn_geom_mesh.setImageData(np.array([[[0,0,0],[255,255,255]],[[255,255,255],[0,0,0]]],dtype=np.uint8))

    P = Calibrate.composeP_fromData((60.8, ), (-51.4, 14.7, 3.2),
                                    (6880, 2860, 5000),
                                    0)  # roughed in camera for 151110
    ks = (0.06, 0.0)
    mat = Calibrate.makeMat(P, ks, (1080, 1920))
    orn_mapper = Opengl.ProjectionMapper(mat)
    orn_mapper.setGLMeshes(orn_geom_mesh)
    orn_geom_mesh.setImage((md['vbuffer'], (md['vheight'], md['vwidth'], 3)))

    mar_mapper = Opengl.ProjectionMapper(mat)
    mar_mapper.setGLMeshes(mar_geom_mesh)
    mar_geom_mesh.setImage((md['vbuffer'], (md['vheight'], md['vwidth'], 3)))

    global g_screen
    g_screen = Opengl.make_quad_distortion_mesh()

    QGLViewer.makeViewer(mat=mat,md=md,layers = {\
		#'ted':ted, 'orn':orn,
		#'ted_skel':ted_skel_mesh,'ted_geom':ted_geom_mesh,\
		#'square':square,



     'orn_skel':orn_skel_mesh,'orn_geom':orn_geom_mesh,\
     'mar_skel':mar_skel_mesh,'mar_geom':mar_geom_mesh,\
      },
    primitives=primitives, primitives2D=primitives2D, timeRange=(0, len(x2d_frames) - 1, 4, 25.0), callback=intersectRaysCB, mats=mats,camera_ids=camera_ids)
Esempio n. 9
0
def intersectRaysCB(fi):
    global x2d_frames, mats, Ps, c3d_frames, view, primitives, primitives2D, track3d, prev_frame, track_orn, orn_graph, boot, g_all_skels, md, orn_mapper, mar_mapper
    skipping = prev_frame is None or np.abs(fi - prev_frame) > 10
    prev_frame = fi
    view = QApp.view()
    points, altpoints = primitives
    g2d = primitives2D[0]
    frame = x2d_frames[fi]
    x2ds_data, x2ds_splits = ViconReader.frameCentroidsToDets(frame, mats)
    g2d.setData(x2ds_data, x2ds_splits)
    if skipping:
        x3ds, x3ds_labels = track3d.boot(x2ds_data, x2ds_splits)
        #trackGraph = Label.TrackGraph()
        boot = -10
    else:
        x3ds, x3ds_labels = track3d.push(x2ds_data, x2ds_splits)
    if False:
        boot = boot + 1
        if boot == 0:
            x2d_threshold_hash = 0.01
            penalty = 10.0  # the penalty for unlabelled points. this number should be about 10. to force more complete labellings, set it higher.
            maxHyps = 500  # the number of hypotheses to maintain.
            print "booting:"
            numLabels = len(orn_graph[0])
            l2x = -np.ones(numLabels, dtype=np.int32)
            label_score = ISCV.label_from_graph(x3ds, orn_graph[0],
                                                orn_graph[1], orn_graph[2],
                                                orn_graph[3], maxHyps, penalty,
                                                l2x)
            clouds = ISCV.HashCloud2DList(x2ds_data, x2ds_splits,
                                          x2d_threshold_hash)
            which = np.array(np.where(l2x != -1)[0], dtype=np.int32)
            pras_score, x2d_labels, vels = Label.project_assign(
                clouds,
                x3ds[l2x[which]],
                which,
                Ps,
                x2d_threshold=x2d_threshold_hash)
            print fi, label_score, pras_score
            labelled_x3ds = x3ds[l2x[which]]
            print track_orn.bootPose(x2ds_data, x2ds_splits, x2d_labels)
        if boot > 0:
            track_orn.push(x2ds_data, x2ds_splits, its=4)
    #x3ds,x2ds_labels = Recon.intersect_rays(x2ds_data, x2ds_splits, Ps, mats, seed_x3ds = None)
    points.setData(x3ds)
    if c3d_frames != None:
        c3ds = c3d_frames[(fi - 832) / 2]
        true_labels = np.array(np.where(c3ds[:, 3] == 0)[0], dtype=np.int32)
        x3ds_true = c3ds[true_labels, :3]
        altpoints.setData(x3ds_true)

    ci = view.cameraIndex() - 1
    if True:  #ci == -1:
        MovieReader.readFrame(md, seekFrame=max((fi - 14) / 4, 0))
        QApp.app.refreshImageData()
    (orn_skel_dict, orn_t) = g_all_skels['orn']
    orn_mesh_dict, orn_skel_mesh, orn_geom_mesh = orn_t
    orn_anim_dict = orn_skel_dict['anim_dict']
    orn_skel_dict['chanValues'][:] = orn_anim_dict['dofData'][fi]
    Character.updatePoseAndMeshes(orn_skel_dict, orn_skel_mesh, orn_geom_mesh)
    (mar_skel_dict, mar_t) = g_all_skels['mar']
    mar_anim_dict = mar_skel_dict['anim_dict']
    mar_mesh_dict, mar_skel_mesh, mar_geom_mesh = mar_t
    Character.updatePoseAndMeshes(mar_skel_dict, mar_skel_mesh, mar_geom_mesh,
                                  mar_anim_dict['dofData'][fi])

    from PIL import Image
    #orn_geom_mesh.setImage((md['vbuffer'],(md['vheight'],md['vwidth'],3)))
    #orn_geom_mesh.refreshImage()

    w, h = 1024, 1024
    cam = view.cameras[0]
    cam.refreshImageData(view)
    aspect = float(max(1, cam.bindImage.width())) / float(
        cam.bindImage.height()) if cam.bindImage is not None else 1.0
    orn_mapper.project(orn_skel_dict['geom_Vs'], aspect)
    data = Opengl.renderGL(w, h, orn_mapper.render, cam.bindId)
    orn_geom_mesh.setImage(data)
    mar_mapper.project(mar_skel_dict['geom_Vs'], aspect)
    data = Opengl.renderGL(w, h, mar_mapper.render, cam.bindId)
    mar_geom_mesh.setImage(data)
    #image = Image.fromstring(mode='RGB', size=(w, h), data=data)
    #image = image.transpose(Image.FLIP_TOP_BOTTOM)
    #image.save('screenshot.png')

    if 0:
        global g_screen
        image = Opengl.renderGL(1920, 1080, Opengl.quad_render,
                                (cam.bindId, g_screen))
        import pylab as pl
        pl.imshow(image)
        pl.show()
    view.updateGL()
Esempio n. 10
0
def load_skels(directory):
    ted_dir = os.path.join(os.environ['GRIP_DATA'], 'ted')
    _, ted_skel_dict = IO.load(os.path.join(ted_dir, 'ted_body6.skel'))

    asf_filename = os.path.join(directory, 'Orn.asf')
    amc_filename = os.path.join(directory, 'Orn.amc')
    asf_dict = ASFReader.read_ASF(asf_filename)
    orn_anim_dict = ASFReader.read_AMC(amc_filename, asf_dict)
    orn_skel_dict = ASFReader.asfDict_to_skelDict(asf_dict)
    orn_skel_dict['anim_dict'] = orn_anim_dict

    # we are going to try to transfer the geometry from ted to orn
    # the challenge here is that the joints/bones are not the same
    # because the rigging is rather different, our best chance is to pose the characters and transfer using joint names

    # orn's joint names begin 'VSS_'
    orn = [t[4:] for t in orn_skel_dict['jointNames']]
    orn[0] = 'root'
    # ted's joint names begin 'GenTed:'. the first joint is 'Reference' (on the floor) and the second 'Hips'.
    ted = [t[7:] for t in ted_skel_dict['jointNames']]
    ted[0] = 'root'
    ted[1] = 'root'
    #ted_skel_dict['Ls'][0][:3,:3] = 1.03 * np.eye(3,3) # apparently, ted is 3% smaller than orn?

    # ted has extra joints compared to orn
    ted_extras = ['RightUpLeg_Roll','LeftUpLeg_Roll','RightArm_Roll','LeftArm_Roll','Neck1','Neck2',\
       'LeftHand1','LeftInHandIndex','LeftInHandMiddle','LeftInHandRing','LeftInHandPinky',\
       'RightHand1','RightInHandIndex','RightInHandMiddle','RightInHandRing','RightInHandPinky',\
       'HeadEnd','LeftToeBaseEnd','RightToeBaseEnd',\
       'LeftHandThumb4','LeftHandIndex4','LeftHandMiddle4','LeftHandRing4','LeftHandPinky4',\
       'RightHandThumb4','RightHandIndex4','RightHandMiddle4','RightHandRing4','RightHandPinky4'
     ]
    ted_extras = sorted([ted.index(n) for n in ted_extras])
    # map ted's extra joints to their parent
    for ji in ted_extras:
        jpi = ted_skel_dict['jointParents'][ji]
        ted[ji] = ted[jpi]
    # some of ted's names differ
    name_mapping = dict([(ot, orn_skel_dict['jointNames'][orn.index(
        t.replace('Spine3', 'Chest').replace('_Roll', 'Roll'))])
                         for t, ot in zip(ted, ted_skel_dict['jointNames'])])
    print zip(ted_skel_dict['jointNames'],
              [name_mapping[t] for t in ted_skel_dict['jointNames']])
    print list(enumerate(ted_skel_dict['jointNames']))
    print list(enumerate(orn_skel_dict['jointNames']))

    orn_indices = np.array([
        orn_skel_dict['jointIndex'][name_mapping[t]]
        for t in ted_skel_dict['jointNames']
    ],
                           dtype=np.int32)

    # solve ted into orn's position.
    # we generate one constraint per joint and zero the weights of those that aren't constrained
    numJoints = ted_skel_dict['numJoints']
    markerParents = np.arange(numJoints, dtype=np.int32)
    markerOffsets = np.zeros((numJoints, 3), dtype=np.float32)
    markerWeights = np.ones(numJoints, dtype=np.float32)
    once = set(orn_indices)
    for mi, oi in enumerate(orn_indices):
        if oi in once: once.remove(oi)
        else:
            markerWeights[mi] = 0
            print 'weighting zero', mi, ted_skel_dict['jointNames'][mi]
    # don't fit the shoulders, to avoid ted's head leaning back
    markerWeights[ted_skel_dict['jointIndex']['GenTed:LeftShoulder']] = 0
    markerWeights[ted_skel_dict['jointIndex']['GenTed:RightShoulder']] = 0
    markerWeights[0] = 0
    markerWeights[1] = 1
    p_o_w = markerParents, markerOffsets, markerWeights
    effectorData = SolveIK.make_effectorData(ted_skel_dict, p_o_w=p_o_w)
    effectorTargets = np.zeros_like(ted_skel_dict['Gs'])
    effectorTargets[:, :, 3] = orn_skel_dict['Gs'][:, :, 3][orn_indices]

    jps = ted_skel_dict['jointParents']
    cvs, jcss = ted_skel_dict['chanValues'], ted_skel_dict['jointChanSplits']

    def kill_joint(ji):
        cvs[jcss[2 * ji]:jcss[2 * ji + 2]] = 0
        #if jcss[2*ji] == jcss[2*ji+2]: kill_joint(jps[ji])

    for it in range(20):
        SolveIK.solveIK(ted_skel_dict,
                        ted_skel_dict['chanValues'],
                        effectorData,
                        effectorTargets,
                        outerIts=4)
        print it, SolveIK.scoreIK(ted_skel_dict, ted_skel_dict['chanValues'],
                                  effectorData, effectorTargets)
        # zero all joints that are only in ted
        for ji in ted_extras:
            kill_joint(ji)
        # for some reason, the Head and Hands wander off: keep them straight
        nji = ted_skel_dict['jointIndex']['GenTed:Neck']
        cvs[jcss[2 * nji]:jcss[2 * nji +
                               2]] *= [0, 0,
                                       1]  # zero first two channels only...
        #kill_joint(nji)
        hji = ted_skel_dict['jointIndex']['GenTed:Head']
        cvs[jcss[2 * hji]:jcss[2 * hji +
                               2]] = -cvs[jcss[2 * nji]:jcss[2 * nji + 2]]
        #kill_joint(hji)
        for jn, ji in ted_skel_dict['jointIndex'].iteritems():
            if 'Hand' in jn: kill_joint(ji)
        print it, SolveIK.scoreIK(ted_skel_dict, ted_skel_dict['chanValues'],
                                  effectorData, effectorTargets)
    # kill all end effectors' parents
    #for ji in xrange(len(jps)):
    #	if ji not in list(jps): kill_joint(jps[ji])
    print SolveIK.scoreIK(ted_skel_dict, ted_skel_dict['chanValues'],
                          effectorData, effectorTargets)
    orn_skel_dict['geom_dict'] = ted_skel_dict['geom_dict']
    orn_skel_dict['geom_Vs'] = ted_skel_dict['geom_Vs'].copy()
    orn_skel_dict['geom_vsplits'] = ted_skel_dict['geom_vsplits'].copy()
    orn_skel_dict['geom_Gs'] = ted_skel_dict['geom_Gs'].copy()
    orn_skel_dict['shape_weights'] = Character.shape_weights_mapping(
        ted_skel_dict, orn_skel_dict, name_mapping)
    return ted_skel_dict, orn_skel_dict
Esempio n. 11
0
def solveIK1Ray(skelDict,
                effectorData,
                x3ds,
                effectorIndices_3d,
                E,
                effectorIndices_2d,
                outerIts=10,
                rootMat=None):
    """
	solveIK routine form Label.py - Has Single ray constraint equations enables

	Given effectors (joint, offset, weight) and constraints for those (3d and 2d), solve for the skeleton pose.
	Effector offsets, weights and targets are 3-vectors
		
	Args:
		skelDict (GskelDict): The Skeleton to process
		effectorData (big o'l structure!):
			effectorJoints, effectorOffsets, effectorWeights, usedChannels, usedChannelWeights, usedCAEs, usedCAEsSplits
		x3ds (float[][3]): 3D Reconstructions
		effectorIndices_3d (?): What's this?
		E (): Equations for 1-Ray constraints, or MDMA.
		effectorIndices_2d (?): What's this?
		outerIts (int): IK Iterations to solve the skeleton. Default = 10
		rootMat (float[3][4]): reference frame of the Skeleton. Default = None
		
	Returns:
		None: The result is an update of the skelDict to the solution - chanValues, channelMats, and Gs.
		
	Requires:
		Character.pose_skeleton_with_chan_mats
		ISCV.derror_dchannel_single_ray
		ISCV.JTJ_single_ray
	"""
    if rootMat is None: rootMat = np.eye(3, 4, dtype=np.float32)
    effectorJoints, effectorOffsets, effectorWeightsOld, usedChannels, usedChannelWeights, usedCAEs, usedCAEsSplits = effectorData
    chanValues = skelDict['chanValues']
    jointParents = skelDict['jointParents']
    Gs = skelDict['Gs']
    Ls = skelDict['Ls']
    jointChans = skelDict['jointChans']
    jointChanSplits = skelDict['jointChanSplits']
    numChannels = jointChanSplits[-1]
    numEffectors = len(effectorJoints)
    num3ds = len(effectorIndices_3d)
    num2ds = len(effectorIndices_2d)
    effectorOffsets = np.copy(effectorOffsets[:, :, 3])
    effectorWeights = np.zeros(numEffectors, dtype=np.float32)
    effectorWeights[
        effectorIndices_3d] = 1  # TODO Why does this fail? effectorWeightsOld[effectorIndices_3d,0,3]
    effectorWeights[
        effectorIndices_2d] = 1  # effectorWeightsOld[effectorIndices_2d,0,3]
    numUsedChannels = len(usedChannels)
    channelMats = np.zeros((numChannels, 3, 4), dtype=np.float32)
    effectors = np.zeros((numEffectors, 3), dtype=np.float32)
    residual = np.zeros((num3ds, 3), dtype=np.float32)
    residual2 = np.zeros((num2ds, 2), dtype=np.float32)
    derrors = np.zeros((numUsedChannels, numEffectors, 3), dtype=np.float32)
    delta = np.zeros((numUsedChannels), dtype=np.float32)
    JTJ = np.zeros((numUsedChannels, numUsedChannels), dtype=np.float32)
    JTB = np.zeros((numUsedChannels), dtype=np.float32)
    JT = derrors.reshape(numUsedChannels, -1)
    JTJdiag = np.diag_indices_from(JTJ)
    for it in xrange(outerIts):
        # TODO, only usedChannels are changing, only update the matrices that have changed after the first iteration.
        # updates the channelMats and Gs
        Character.pose_skeleton_with_chan_mats(channelMats, Gs, skelDict,
                                               chanValues, rootMat)
        bestScore = ISCV.pose_effectors_single_ray(
            effectors, residual, residual2, Gs, effectorJoints,
            effectorOffsets, effectorWeights, x3ds, effectorIndices_3d, E,
            effectorIndices_2d)
        if np.sum(residual * residual) + np.sum(
                residual2 * residual2) <= 1e-5 * (num3ds + num2ds):
            break  # early termination
        ISCV.derror_dchannel_single_ray(derrors, channelMats, usedChannels,
                                        usedChannelWeights, usedCAEs,
                                        usedCAEsSplits, jointChans, effectors,
                                        effectorWeights)
        # J = d_effectors/dc
        # err(c) = x3ds - effectors[effectorIndices_3d], e0 + E effectors[effectorIndices_2d]; err(c+delta) = x3ds - effectors[effectorIndices_3d] - J[effectorIndices_3d] delta, e0 + E effectors[effectorIndices_2d] + E J[effectorIndices_2d] delta  = 0
        # J dc = B; (J[effectorIndices_3d] ; E J[effectorIndices_2d]) dc = B ; e0
        # DLS method : solve (JTJ + k^2 I) delta = JTB
        ISCV.JTJ_single_ray(
            JTJ, JTB, JT, residual, effectorIndices_3d, E, effectorIndices_2d,
            residual2)  #np.dot(JT, B, out=JTB); np.dot(JT, JT.T, out=JTJ)
        JTJ[JTJdiag] += 1
        JTJ[JTJdiag] *= 1.1
        # delta[:] = np.linalg.solve(JTJ, JTB)
        _, delta[:], _ = LAPACK.dposv(JTJ, JTB)  # Use Positive Definite Solver
        chanValues[usedChannels] += delta
        # TODO: add channel limits

        # # J_transpose method, 3d only: scaling problems with translation
        #JT = derrors[:,effectorIndices_3d,:].reshape(numUsedChannels,-1)
        #np.dot(JT, B, out=delta)
        #np.dot(JT.T,delta,out=JJTB)
        #delta *= np.dot(B,JJTB)/(np.dot(JJTB,JJTB)+1)
        #delta[:3] *= 100000.
        #testScale = ISCV.Jtranspose_SR(delta, JJTB, JT, residual,effectorIndices_3d,residual2,effectorIndices_2d)
    Character.pose_skeleton(Gs, skelDict, chanValues, rootMat)
Esempio n. 12
0
def solveIK(skelDict,
            chanValues,
            effectorData,
            effectorTargets,
            outerIts=10,
            rootMat=None):
    """
	Given an initial skeleton pose (chanValues), effectors (ie constraints: joint, offset, weight, target), solve for the skeleton pose.
	Effector weights and targets are 3x4 matrices.
		* Setting 1 in the weight's 4th column makes a position constraint.
		* Setting 100 in the weight's first 3 columns makes an orientation constraint.
		
	Args:
		skelDict (GskelDict): The Skeleton to process
		chanValues (float[]): Initial pose of the skeleton as Translation and many rotations applied to joints in the skelDict.
		effectorData (big o'l structure!):
			effectorJoints, effectorOffsets, effectorWeights, usedChannels, usedChannelWeights, usedCAEs, usedCAEsSplits
		effectorTargets (?): What's this?
		outerIts (int): IK Iterations to solve the skeleton. Default = 10
		rootMat (float[3][4]): reference frame of the Skeleton. Default = None
		
	Returns:
		None: The result is an update of the skelDict to the solution - chanValues, channelMats, and Gs.
		
	Requires:
		Character.pose_skeleton_with_chan_mats
		ISCV.pose_effectors
		ISCV.derror_dchannel
		ISCV.JTJ
	"""
    effectorJoints, effectorOffsets, effectorWeights, usedChannels, usedChannelWeights, usedCAEs, usedCAEsSplits = effectorData
    jointParents = skelDict['jointParents']
    Gs = skelDict['Gs']
    Ls = skelDict['Ls']
    jointChans = skelDict['jointChans']
    jointChanSplits = skelDict['jointChanSplits']
    numChannels = jointChanSplits[-1]
    numEffectors = len(effectorJoints)
    numUsedChannels = len(usedChannels)
    channelMats = np.zeros((numChannels, 3, 4), dtype=np.float32)
    #usedEffectors   = np.array(np.where(np.sum(effectorWeights,axis=(1,2)) != 0)[0], dtype=np.int32)
    usedEffectors = np.array(np.where(effectorWeights.reshape(-1) != 0)[0],
                             dtype=np.int32)
    # numUsedEffectors= len(usedEffectors)
    effectors = np.zeros((numEffectors, 3, 4), dtype=np.float32)
    residual = np.zeros((numEffectors, 3, 4), dtype=np.float32)
    derrors = np.zeros((numUsedChannels, numEffectors, 3, 4), dtype=np.float32)
    # steps           = np.ones((numUsedChannels),dtype=np.float32)*0.2
    # steps[np.where(jointChans[usedChannels] < 3)[0]] = 30.
    # steps = 1.0/steps
    delta = np.zeros((numUsedChannels), dtype=np.float32)
    # JJTB            = np.zeros((numEffectors*12),dtype=np.float32)
    JTJ = np.zeros((numUsedChannels, numUsedChannels), dtype=np.float32)
    JTB = np.zeros((numUsedChannels), dtype=np.float32)
    JT = derrors.reshape(numUsedChannels, -1)
    JTJdiag = np.diag_indices_from(JTJ)
    B = residual.reshape(-1)
    # TODO, calculate the exact requirements on the tolerance
    B_len = len(B)
    tolerance = 0.00001
    it_eps = (B_len**0.5) * tolerance
    for it in xrange(outerIts):
        # TODO, only usedChannels are changing, only update the matrices that have changed after the first iteration.
        # TODO Look into damping, possibly clip residuals?
        # updates the channelMats and Gs
        Character.pose_skeleton_with_chan_mats(channelMats, Gs, skelDict,
                                               chanValues, rootMat)
        bestScore = ISCV.pose_effectors(effectors, residual, Gs,
                                        effectorJoints, effectorOffsets,
                                        effectorWeights, effectorTargets)
        if np.linalg.norm(B) < it_eps: break  # early termination
        ISCV.derror_dchannel(derrors, channelMats, usedChannels,
                             usedChannelWeights, usedCAEs, usedCAEsSplits,
                             jointChans, effectors, effectorWeights)
        # if True: # DLS method : solve (JTJ + k^2 I) delta = JTB
        ISCV.JTJ(
            JTJ, JTB, JT, B,
            usedEffectors)  #np.dot(JT, B, out=JTB); np.dot(JT, JT.T, out=JTJ)
        JTJ[JTJdiag] += 1
        JTJ[JTJdiag] *= 1.1
        _, delta[:], _ = LAPACK.dposv(JTJ, JTB)  # Use Positive Definite Solver
        # Use General Solver
        # delta[:] = np.linalg.solve(JTJ, JTB)
        # elif it==0: # SVD method: solve J delta = B
        # 	delta[:] = np.linalg.lstsq(JT.T[usedEffectors], B[usedEffectors], rcond=0.0001)[0].reshape(-1)
        # else:     # J transpose method
        # 	testScale = ISCV.J_transpose(delta, JJTB, JT, B)
        # 	#np.dot(JT, B, out=delta); np.dot(JT.T,delta,out=JJTB); delta *= np.dot(B,JJTB)/(np.dot(JJTB,JJTB)+1.0)
        #scale = np.max(np.abs(delta*steps))
        #if scale > 1.0: delta *= 1.0/scale
        #np.clip(delta,-steps,steps,out=delta)
        chanValues[usedChannels] += delta
        # TODO: add channel limits
        #bestScore = ISCV.lineSearch(chanValues, usedChannels, delta, Gs, Ls, jointParents, jointChans, jointChanSplits,
        #							rootMat, effectorJoints, effectorOffsets, effectorWeights, effectorTargets, innerIts, bestScore)
    #print np.mean(B*B)
    Character.pose_skeleton(Gs, skelDict, chanValues, rootMat)
Esempio n. 13
0
def animateHead(newFrame):
    global ted_geom, ted_geom2, ted_shape, tony_geom, tony_shape, tony_geom2, tony_obj, ted_obj, diff_geom, c3d_frames, extract
    global tony_shape_vector, tony_shape_mat, ted_lo_rest, ted_lo_mat, c3d_points
    global md, movies
    tony_geom.image, tony_geom.bindImage, tony_geom.bindId = ted_geom.image, ted_geom.bindImage, ted_geom.bindId  # reuse the texture!
    fo = 55
    MovieReader.readFrame(md, seekFrame=((newFrame + fo) / 2))
    view = QApp.view()
    for ci in range(0, 4):
        view.cameras[ci + 1].invalidateImageData()
    ci = view.cameras.index(view.camera) - 1
    if ci >= 0:
        MovieReader.readFrame(movies[ci],
                              seekFrame=(newFrame +
                                         fo))  # only update the visible camera
    frac = (newFrame % 200) / 100.
    if (frac > 1.0): frac = 2.0 - frac
    fi = newFrame % len(c3d_frames)

    if ted_skel:  # move the skeleton
        dofs = ted_anim['dofData'][fi * 2 - 120]
        Character.pose_skeleton(ted_skel['Gs'], ted_skel, dofs)
        ted_glskel.setPose(ted_skel['Gs'])
        offset = ted_skel['Gs'][13]  # ted_skel['jointNames'].index('VSS_Head')

        cams = QApp.app.getLayers()['cameras']
        tmp = np.eye(4, 4, dtype=np.float32)
        tmp[:3, :] = offset
        cams.setTransform(tmp)

        if ci >= 0:  # move the camera view to be correct
            camRT = mats[ci][1]
            RT = np.dot(camRT, np.linalg.inv(tmp))
            view.cameras[ci + 1].setRT(RT)

        # update the face geometries to fit the skeleton
        ted_geom.setPose(offset.reshape(1, 3, 4))
        tony_geom.setPose(offset.reshape(1, 3, 4))
        #TODO head_points,c3d_points,surface_points,ted_geom2

    frame = c3d_frames[fi][extract]
    which = np.where(frame[:, 3] == 0)[0]
    x3ds = frame[which, :3]
    #print which,x3ds.shape,ted_lo_rest.shape,ted_lo_mat.shape
    bnds = np.array([[0, 1]] * ted_lo_mat.shape[0], dtype=np.float32)
    tony_shape_vector[:] = OBJReader.fitLoResShapeMat(ted_lo_rest,
                                                      ted_lo_mat,
                                                      x3ds,
                                                      Aoffset=10.0,
                                                      Boffset=3.0,
                                                      x_0=tony_shape_vector,
                                                      indices=which,
                                                      bounds=bnds)
    #global tony_shape_vectors; tony_shape_vector[:] = tony_shape_vectors[newFrame%len(tony_shape_vectors)]

    #tony_shape_vector *= 0.
    #tony_shape_vector += (np.random.random(len(tony_shape_vector)) - 0.5)*0.2
    if 1:
        ted_shape_v = np.dot(ted_shape_mat_T, tony_shape_vector).reshape(-1, 3)
    else:
        ted_shape_v = np.zeros_like(ted_obj['v'])
        ISCV.dot(ted_shape_mat_T, tony_shape_vector, ted_shape_v.reshape(-1))
    tony_shape_v = ted_shape_v
    #tony_shape_v = tony_shape['v']*frac
    ted_geom.setVs(ted_obj['v'] + ted_shape_v)  #ted_shape['v'] * frac)
    tony_geom.setVs(tony_obj['v'] + tony_shape_v -
                    np.array([200, 0, 0], dtype=np.float32))
    ted_geom2.setVs(ted_obj['v'] * (1.0 - frac) +
                    tony_tedtopo_obj['v'] * frac +
                    np.array([200, 0, 0], dtype=np.float32))
    #if len(ted_shape_v) == len(tony_shape_v):
    #	tony_geom2.setVs(tony_obj['v'] + ted_shape_v - [400,0,0])
    #	diff_geom.setVs(ted_obj['v'] + tony_shape_v - ted_shape_v - [600,0,0])

    #print [c3d_labels[i] for i in which]
    surface_points.vertices = np.dot(ted_lo_mat.T,
                                     tony_shape_vector).T + ted_lo_rest
    surface_points.colour = [0, 1, 0, 1]  # green
    c3d_points.vertices = x3ds
    c3d_points.colour = [1, 0, 0, 1]  # red

    QApp.app.refreshImageData()
    QApp.app.updateGL()