Example #1
0
def get_movie_frame(md, frame, deinterlacing):
    '''Read a MovieReader frame and return it together with a filtered version.'''
    if deinterlacing:
        field = frame & 1
        frame /= 2
    try:
        MovieReader.readFrame(md, seekFrame=frame)
    except:
        print 'oops', frame
        return None, None
    img = np.frombuffer(md['vbuffer'],
                        dtype=np.uint8).reshape(md['vheight'], md['vwidth'], 3)

    if deinterlacing:  # TODO check even/odd
        y = np.arange(0, md['vheight'], 2)
        if field: img[y, :] = img[y + 1, :]  # odd
        else: img[y + 1, :] = img[y, :]  # even
    return img
Example #2
0
def setFrame(newFrame):
    global frame, view, allSkels, points, joints, bones
    frame = newFrame
    for Gs3, Ls3, skelDict3, animData3, skel3 in allSkels:
        dofs3 = animData3[frame % len(animData3)]
        Gs3 = ASFReader.pose_skeleton(Gs3, Ls3, skelDict3['jointParents'],
                                      skelDict3['jointDofs'],
                                      skelDict3['dofSplits'], dofs3)
        skel3.vertices[:] = Gs3[:, :, 3]

    global md, img, g_detectingDots, g_readingMovie
    if g_readingMovie and md is not None:
        try:
            MovieReader.readFrame(md, seekFrame=(frame - videoFrameOffset) / 4)
        except:
            frame = videoFrameOffset
            MovieReader.readFrame(md, seekFrame=(frame - videoFrameOffset) / 4)
        if g_detectingDots:
            ret = ISCV.detect_bright_dots(img, 254, 200, 190)
            good = [
                r for r in ret
                if min(r.sxx, r.syy) > 0.1 and min(r.sxx, r.syy) < 100.0
            ]  # and r.sxy*r.sxy<=0.01*r.sxx*r.syy]
            print len(good), 'good points'
            for r in good:
                #print r.sx,r.sy,r.sxx,r.sxy,r.syy
                img[int(r.sy - 5):int(r.sy + 5),
                    int(r.sx - 5):int(r.sx + 5), :] = [0, 255, 0]
        view.refreshImageData()
    global animJoints, stablePointsGroups, displayFrames, groupRepresentatives
    pfr = np.searchsorted(goodFrames, frame)
    points.vertices = displayFrames[pfr % len(displayFrames)]
    if animJoints is not None:
        joints.vertices[:] = animJoints[pfr % len(animJoints)]
    bones.vertices[::2] = joints.vertices
    bones.vertices[1::2] = points.vertices[
        groupRepresentatives[stablePointsGroups]]

    view.updateGL()
Example #3
0
def import_movie_frames():
    movie_fn, _ = QApp.app.loadFilename(
        'Choose a movie to open', cwd(),
        'Movie Files (*.mp4 *.mov *.avi *.flv *.mpg)')
    if movie_fn == '': return  # cancel
    set_cwd(movie_fn)
    txt_fn, _ = QApp.app.loadFilename(
        'Choose a text file of frame indices to open', cwd(),
        'Text Files (*.txt)')
    md = MovieReader.open_file(movie_fn, audio=False)
    images, shapes = [], []
    if txt_fn == '':
        frames = range(0, md['vmaxframe'], 100)
        #if txt_fn == '': frames = range(30000, 38300, 100)
    else:
        frames = [int(l.split(':')[1]) for l in open(txt_fn, 'r').readlines()]
    for fi in frames:
        print fi, '/', frames[-1]
        MovieReader.readFrame(md, fi)
        add_image(
            np.frombuffer(md['vbuffer'],
                          dtype=np.uint8).reshape(md['vheight'], md['vwidth'],
                                                  3).copy())
    State.push('Import movie frames')
Example #4
0
def set_frame_cb2(frame):
	global g_predictor, g_predictor_dlib, g_detector
	size = (len(g_predictor['ref_shape'])+4)
	geo_vs = np.zeros((size,3), dtype=np.float32)
	ref_vs = np.zeros((size,3), dtype=np.float32)

	global g_prev_vs
	try: g_prev_vs
	except: g_prev_vs = None
	if 0: # show_images
		global g_jpgs; fn = g_jpgs[frame%len(g_jpgs)]
		img = Face.load_image(fn)
		img = Face.fix_image(img, max_size=640)
		use_prev_vs = False # images need booting every frame
	else: # show_movies
		global md; MovieReader.readFrame(md, seekFrame=frame) # only update the visible camera
		img = np.frombuffer(md['vbuffer'], dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3)
		use_prev_vs = True
		
	if 0: # undistort_stuff
		global g_screen
		global g_tid, g_bid
		g_tid,g_bid = Opengl.bind_streaming_image(img, g_tid, g_bid)
		img = Opengl.renderGL(img.shape[1], img.shape[0], Opengl.quad_render, (g_tid, g_screen, 0.85))
		#Opengl.unbind_image(bid)

	if 0: # rotated_image
		img = img.transpose((1,0,2)).copy()
	if 0: # gamma_image
		lookup = np.array([int(((x/255.0)**0.4545)*255.0) for x in range(256)], dtype=np.uint8)
		img = lookup[img]
	#img[:,600:1000] = 0 #img[:,200:600].copy()
	if 0: # test_rotate
		import scipy; img = scipy.misc.imrotate(img, frame, interp='bilinear')
	if 0: # test_rotate_right
		import scipy; img[:,-img.shape[0]:] = scipy.misc.imrotate(img[:,-img.shape[0]:], frame, interp='bilinear')
	if 0: # test_filter_image
		img = ISCV.filter_image(img,4,16)

	w,h = img.shape[1]*0.5,img.shape[0]*0.5

	boot = g_prev_vs
	if boot is None: boot = Face.detect_face(img, g_predictor, 2) # ,-1) # put -1 at end to boot at any angle
	tmp = Face.track_face(img, g_predictor, boot)
	if use_prev_vs and boot is not None: g_prev_vs = tmp
	if frame == 0 or Face.test_reboot(img, g_prev_vs): g_prev_vs = None
	global template_vs
	geo_vs[:size-4,:2] = tmp
	geo_vs[size-4:size,:2] = Face.get_boundary(geo_vs[:size-4,:2], template_vs)

	if 0: # show_aam
		global g_aam_model
		shape_u, tex_u, A_inv, mn  = Face.fit_aam(g_aam_model, tmp, img)
		Face.render_aam(g_aam_model, A_inv*0.1, mn*0.1, shape_u, tex_u, img)
		su,tu = Face.normalized_aam_coords(g_aam_model, shape_u, tex_u)
		res = Face.aam_residual(g_aam_model, tmp, img)
		QApp.view().displayText = [(10,100,'%f' % np.linalg.norm(tu)),(10,125,'%f' % np.linalg.norm(su)),(10,150,'%f'%res)]

	if 0: # show_extracted_texture
		global g_aam_model_indices,g_aam_model_weights
		pixels = Face.extract_texture(img, geo_vs[:size,:2], g_aam_model_indices, g_aam_model_weights)
		global template_vs
		Face.render_texture(pixels, img, template_vs, g_aam_model_indices, g_aam_model_weights)

	geo_mesh = QApp.app.getLayer('geo_mesh')
	geo_mesh.setVs(geo_vs)
	geo_mesh.transforms[0][:,:3] = [[1,0,0],[0,1,0],[0,0,1],[-w,1000-h,0.1]]
	image_mesh = QApp.app.getLayer('image_mesh')
	image_mesh.setVs(np.array([[-w,-h,0],[w,-h,0],[w,h,0],[-w,h,0]], dtype=np.float32))
	image_mesh.setImage(img)
	QApp.view().updateGL()
Example #5
0
def setFrame(frame):
    global State, mats, movieFilenames, primitives
    global movies, primitives2D, deinterlacing, detectingWands, dot_detections, track3d, prev_frame, booting, trackGraph
    key = State.getKey('dotParams/attrs')

    skipping, prev_frame = (frame != prev_frame
                            and frame - 1 != prev_frame), frame
    booting = 10 if skipping else booting - 1

    p0, p1 = [], []

    if True:  #dot_detections is None:

        for pair in enumerate(movies):
            pts = process_frame(deinterlacing, detectingWands, frame, key,
                                pair)
            p0.append(pts[0])
            p1.append(pts[1])

        def make_bounds(lens):
            return np.array([sum(lens[:x]) for x in xrange(len(lens) + 1)],
                            dtype=np.int32)

        data0 = np.array(np.concatenate(p0),
                         dtype=np.float32).reshape(-1, 2), make_bounds(
                             map(len, p0))
        data1 = np.array(np.concatenate(p1),
                         dtype=np.float32).reshape(-1, 2), make_bounds(
                             map(len, p1))
    else:
        #dot_detections = movies_to_detections(movies, [frame], deinterlacing, key)
        data0, data1 = dot_detections[frame] if dot_detections.has_key(
            frame) else dot_detections.values()[0]
        for ci, md in enumerate(movies):
            try:
                MovieReader.readFrame(md, seekFrame=frame)
            except:
                print 'oops', frame
                return None, None
            #img = np.frombuffer(md['vbuffer'],dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3)
            QApp.view().cameras[ci + 1].invalidateImageData()
            data0 = data0[0].copy(), data0[
                1]  # so that undistort doesn't modify the raw detections
            data1 = data1[0].copy(), data1[1]
    # TODO, move this to the viewer...
    data0 = ViconReader.frameCentroidsToDets(data0, mats)
    data1 = ViconReader.frameCentroidsToDets(data1, mats)

    primitives2D[0].setData(data0[0], data0[1])
    primitives2D[1].setData(data1[0], data1[1])

    #print x2ds_labels
    if len(movieFilenames) is not 1:
        if 1:
            #x2ds_data, x2ds_splits = data0 # dark points only
            x2ds_data, x2ds_splits = data1  # light points only
            if skipping:
                x3ds, x3ds_labels = track3d.boot(x2ds_data, x2ds_splits)
                #trackGraph = Label.TrackGraph()
            else:
                x3ds, x3ds_labels = track3d.push(x2ds_data, x2ds_splits)
                # coarse bounding box
                if False:
                    for xi, x in zip(x3ds_labels, x3ds):
                        if x[0] < -200 or x[0] > 200 or x[1] < 800 or x[
                                1] > 1200 or x[2] < -50 or x[2] > 300:
                            track3d.x2ds_labels[np.where(
                                track3d.x2ds_labels == xi)[0]] = -1
                            x[:] = 0
            primitives[0].setData(x3ds)
            #trackGraph.push(x3ds,x3ds_labels)
            #primitives[0].graph = trackGraph.drawing_graph()
        elif False:
            Ps = np.array([m[2] / (m[0][0, 0]) for m in mats],
                          dtype=np.float32)
            data = data0  # dark points
            #data = data1 # light points
            x3ds, x2ds_labels = Recon.intersect_rays(data[0],
                                                     data[1],
                                                     Ps,
                                                     mats,
                                                     tilt_threshold=0.003,
                                                     x2d_threshold=0.02,
                                                     x3d_threshold=5.0,
                                                     min_rays=2)
            primitives[0].setData(x3ds)
        if detectingTiara:
            global c3d_frames
            frame = c3d_frames[(frame - 55) % len(c3d_frames)]
            which = np.where(frame[:, 3] == 0)[0]
            x3ds = frame[which, :3]
            #print frame,'len',len(x3ds)
            primitives[1].setData(x3ds)
    QApp.app.refreshImageData()
    QApp.app.updateGL()
def intersectRaysCB(fi):
    global x2d_frames, mats, Ps, c3d_frames, view, primitives, primitives2D, track3d, prev_frame, track_orn, orn_graph, boot, g_all_skels, md, orn_mapper, mar_mapper
    skipping = prev_frame is None or np.abs(fi - prev_frame) > 10
    prev_frame = fi
    view = QApp.view()
    points, altpoints = primitives
    g2d = primitives2D[0]
    frame = x2d_frames[fi]
    x2ds_data, x2ds_splits = ViconReader.frameCentroidsToDets(frame, mats)
    g2d.setData(x2ds_data, x2ds_splits)
    if skipping:
        x3ds, x3ds_labels = track3d.boot(x2ds_data, x2ds_splits)
        #trackGraph = Label.TrackGraph()
        boot = -10
    else:
        x3ds, x3ds_labels = track3d.push(x2ds_data, x2ds_splits)
    if False:
        boot = boot + 1
        if boot == 0:
            x2d_threshold_hash = 0.01
            penalty = 10.0  # the penalty for unlabelled points. this number should be about 10. to force more complete labellings, set it higher.
            maxHyps = 500  # the number of hypotheses to maintain.
            print "booting:"
            numLabels = len(orn_graph[0])
            l2x = -np.ones(numLabels, dtype=np.int32)
            label_score = ISCV.label_from_graph(x3ds, orn_graph[0],
                                                orn_graph[1], orn_graph[2],
                                                orn_graph[3], maxHyps, penalty,
                                                l2x)
            clouds = ISCV.HashCloud2DList(x2ds_data, x2ds_splits,
                                          x2d_threshold_hash)
            which = np.array(np.where(l2x != -1)[0], dtype=np.int32)
            pras_score, x2d_labels, vels = Label.project_assign(
                clouds,
                x3ds[l2x[which]],
                which,
                Ps,
                x2d_threshold=x2d_threshold_hash)
            print fi, label_score, pras_score
            labelled_x3ds = x3ds[l2x[which]]
            print track_orn.bootPose(x2ds_data, x2ds_splits, x2d_labels)
        if boot > 0:
            track_orn.push(x2ds_data, x2ds_splits, its=4)
    #x3ds,x2ds_labels = Recon.intersect_rays(x2ds_data, x2ds_splits, Ps, mats, seed_x3ds = None)
    points.setData(x3ds)
    if c3d_frames != None:
        c3ds = c3d_frames[(fi - 832) / 2]
        true_labels = np.array(np.where(c3ds[:, 3] == 0)[0], dtype=np.int32)
        x3ds_true = c3ds[true_labels, :3]
        altpoints.setData(x3ds_true)

    ci = view.cameraIndex() - 1
    if True:  #ci == -1:
        MovieReader.readFrame(md, seekFrame=max((fi - 14) / 4, 0))
        QApp.app.refreshImageData()
    (orn_skel_dict, orn_t) = g_all_skels['orn']
    orn_mesh_dict, orn_skel_mesh, orn_geom_mesh = orn_t
    orn_anim_dict = orn_skel_dict['anim_dict']
    orn_skel_dict['chanValues'][:] = orn_anim_dict['dofData'][fi]
    Character.updatePoseAndMeshes(orn_skel_dict, orn_skel_mesh, orn_geom_mesh)
    (mar_skel_dict, mar_t) = g_all_skels['mar']
    mar_anim_dict = mar_skel_dict['anim_dict']
    mar_mesh_dict, mar_skel_mesh, mar_geom_mesh = mar_t
    Character.updatePoseAndMeshes(mar_skel_dict, mar_skel_mesh, mar_geom_mesh,
                                  mar_anim_dict['dofData'][fi])

    from PIL import Image
    #orn_geom_mesh.setImage((md['vbuffer'],(md['vheight'],md['vwidth'],3)))
    #orn_geom_mesh.refreshImage()

    w, h = 1024, 1024
    cam = view.cameras[0]
    cam.refreshImageData(view)
    aspect = float(max(1, cam.bindImage.width())) / float(
        cam.bindImage.height()) if cam.bindImage is not None else 1.0
    orn_mapper.project(orn_skel_dict['geom_Vs'], aspect)
    data = Opengl.renderGL(w, h, orn_mapper.render, cam.bindId)
    orn_geom_mesh.setImage(data)
    mar_mapper.project(mar_skel_dict['geom_Vs'], aspect)
    data = Opengl.renderGL(w, h, mar_mapper.render, cam.bindId)
    mar_geom_mesh.setImage(data)
    #image = Image.fromstring(mode='RGB', size=(w, h), data=data)
    #image = image.transpose(Image.FLIP_TOP_BOTTOM)
    #image.save('screenshot.png')

    if 0:
        global g_screen
        image = Opengl.renderGL(1920, 1080, Opengl.quad_render,
                                (cam.bindId, g_screen))
        import pylab as pl
        pl.imshow(image)
        pl.show()
    view.updateGL()
Example #7
0
def update_rbfn(md, short_name='Take', mapping_file=None):
	global g_rbfn, g_predictor
	# TODO these groups must have weights, this can't initialise weights
	groups, slider_splits, slider_names, marker_names = extract_groups(g_rbfn)

	# update the neutral
	if mapping_file:
		fi = mapping_file[mapping_file.keys()[0]]['Neutral']
	else:
		g = groups[0][1]
		print g.keys()
		active_poses = [pn for pn in g['marker_data'].keys() if pn not in g.get('disabled', [])]
		ni = [ap.rsplit('_',2)[1]=='Neutral' for ap in active_poses].index(True)
		fi = int(active_poses[ni].rsplit('_',2)[2])
	print 'neutral on frame',fi
	MovieReader.readFrame(md, fi)
	img = np.frombuffer(md['vbuffer'],dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3).copy()
	vs = Face.detect_face(img, g_predictor)
	vs = Face.track_face(img, g_predictor, vs)
	clear_neutral()
	g_rbfn['neutral'] = stabilize_shape(vs)[0]
	for (gn,group) in groups:
		gmd,gsd,gis = {},{},{}
		for pose_key,pose_data in group['marker_data'].iteritems():
			sd = group['slider_data'][pose_key]
			test_short_name,pose_name,frame_number = pose_key.rsplit('_',2)
			assert(test_short_name == short_name)
			fi = int(frame_number)
			print fi
			if mapping_file:
				if pose_name not in mapping_file[gn]:
					print 'WARNING: pose %s missing; removing from rbfn' % pose_name
					continue
				fi = mapping_file[gn].pop(pose_name)
				print 'remapping to',fi
			MovieReader.readFrame(md, fi)
			img = np.frombuffer(md['vbuffer'],dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3).copy()
			vs = Face.detect_face(img, g_predictor)
			if vs is None:
				print 'failed to boot'
				for vi in range(max(fi-300,0),fi):
					MovieReader.readFrame(md, vi)
					img2 = np.frombuffer(md['vbuffer'],dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3).copy()
					vs = Face.detect_face(img2, g_predictor)
					if vs is not None:
						print 'booted on frame',vi
						for vi2 in range(vi+1,fi):
							MovieReader.readFrame(md, vi2)
							img2 = np.frombuffer(md['vbuffer'],dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3).copy()
							vs = Face.track_face(img2, g_predictor, vs)
						break
					if vi == fi-1: print 'don\'t know what to do'
			vs = Face.track_face(img, g_predictor, vs)
			#Face.show_image(img,vs)
			#vs, head_pan, head_tilt, A = stabilize_shape(vs)
			print pose_name
			#tmp = pose_data.reshape(-1,3)[:,:2]
			#Face.show_image(None,tmp-np.mean(tmp,axis=0),(vs-np.mean(vs,axis=0))*5)
			pose_data = np.hstack((vs,np.zeros((vs.shape[0],1),dtype=np.float32)))
			pose_key = '_'.join((short_name,pose_name,str(fi)))
			gmd[pose_key] = pose_data
			gsd[pose_key] = sd
			gis[pose_key] = JPEG.compress(img)
		group['marker_data'] = gmd
		group['slider_data'] = gsd
		group['images'] = gis
	if mapping_file: print 'left overs:',mapping_file
Example #8
0
def track_view_cb(fi, attrs):
	# g_mode = 0
	global g_webcam, g_md, g_rbfn, g_predictor
	# runtime options and state
	global g_prev_smooth_shape, g_prev_vs, g_hmc_boot, g_settle, g_head_pan_tilt_roll

	if attrs['using_webcam']:
		if g_webcam is None:
			g_webcam = WebCam()
			g_webcam.Open(State.getKey('/root/ui/attrs/cam_offset') + State.getKey('/root/ui/attrs/webcam_index'))
			g_webcam.SetProperty('FPS', State.getKey('/root/ui/attrs/cam_fps'))
			g_webcam.SetProperty('FRAME_WIDTH', State.getKey('/root/ui/attrs/cam_width'))
			g_webcam.SetProperty('FRAME_HEIGHT', State.getKey('/root/ui/attrs/cam_height'))
		if g_webcam is None:
			img = np.zeros((16,16,3),dtype=np.uint8)
		else:
			img = g_webcam.GetFrame()
			if img is None:
				img = np.zeros((16,16,3),dtype=np.uint8)
	elif g_md is not None:
		MovieReader.readFrame(g_md, seekFrame=fi) # only update the visible camera
		img = np.frombuffer(g_md['vbuffer'], dtype=np.uint8).reshape(g_md['vheight'],g_md['vwidth'],3)
		#QApp.app.qtimeline.setRange(0, g_md['vmaxframe'])
	else:
		img = np.zeros((16,16,3),dtype=np.uint8)
	
	mirror_scale = -1 if attrs['mirroring'] else 1
	rotate = attrs['rotate']

	if g_settle >= 0:
		if g_settle == 0 and g_prev_vs is not None:
			g_hmc_boot = g_prev_vs.copy()
		g_settle = g_settle - 1
	else:
		if attrs['HMC_mode'] and g_hmc_boot is not None: g_prev_vs = g_hmc_boot.copy()
		if attrs['booting'] or Face.test_reboot(img, g_prev_vs):
			g_prev_vs = Face.detect_face(img, g_predictor, 2, rotate)
			g_hmc_boot = None # in case we didn't detect a face
			g_settle = 10 # go into settle mode (10 frames)
			if g_prev_vs is not None:
				State.setKey('/root/ui/attrs/booting',False)
				if attrs['HMC_mode']: g_hmc_boot = g_prev_vs.copy()
	g_prev_vs = Face.track_face(img, g_predictor, g_prev_vs, rotate=rotate)

	# compensate for roll, translation and scale
	norm_shape, head_pan, head_tilt, A = stabilize_shape(g_prev_vs, setting_neutral=attrs['setting_neutral'])
	# dejitter
	if attrs['filtering']:
		g_prev_smooth_shape = filter_data(norm_shape, g_prev_smooth_shape)
	else:
		g_prev_smooth_shape = norm_shape.copy()
	# extract angles from the measured values
	head_pan_tilt_roll = np.degrees(np.arctan2([head_pan*mirror_scale, head_tilt, -mirror_scale*A[1][0]],[2,2,A[1][1]]))
	g_head_pan_tilt_roll = filter_data(head_pan_tilt_roll, g_head_pan_tilt_roll, 3.0)

	camera = QApp.view().camera
	camera.lockedUpright = False
	camera.cameraRoll = (-90*rotate if rotate != -1 else g_head_pan_tilt_roll[2])

	ret = g_prev_smooth_shape.copy()
	if attrs['mirroring']:
		flip_order = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0, 26,25,24,23,22,21,20,19,18,17, 27,28,29,30, 35,34,33,32,31, \
			  45,44,43,42, 47,46, 39,38,37,36, 41,40, 54,53,52,51,50,49,48, 59,58,57,56,55, 64,63,62,61,60, 67,66,65, 69,68]
		ret = ret[flip_order]
	slider_names, slider_values = applyRetarget(g_rbfn, ret)
	#State._setKey('/root/sliders/attrs', dict(zip(slider_names, slider_values))) # NO UNDO
	slider_names.extend(['NeckPan','NeckTilt','NeckRoll'])
	slider_values = np.float32(list(slider_values)+list(g_head_pan_tilt_roll))

	return g_head_pan_tilt_roll.copy(),g_prev_vs.copy(),norm_shape,img,slider_names,slider_values,A
Example #9
0
def animateHead(newFrame):
    global ted_geom, ted_geom2, ted_shape, tony_geom, tony_shape, tony_geom2, tony_obj, ted_obj, diff_geom, c3d_frames, extract
    global tony_shape_vector, tony_shape_mat, ted_lo_rest, ted_lo_mat, c3d_points
    global md, movies
    tony_geom.image, tony_geom.bindImage, tony_geom.bindId = ted_geom.image, ted_geom.bindImage, ted_geom.bindId  # reuse the texture!
    fo = 55
    MovieReader.readFrame(md, seekFrame=((newFrame + fo) / 2))
    view = QApp.view()
    for ci in range(0, 4):
        view.cameras[ci + 1].invalidateImageData()
    ci = view.cameras.index(view.camera) - 1
    if ci >= 0:
        MovieReader.readFrame(movies[ci],
                              seekFrame=(newFrame +
                                         fo))  # only update the visible camera
    frac = (newFrame % 200) / 100.
    if (frac > 1.0): frac = 2.0 - frac
    fi = newFrame % len(c3d_frames)

    if ted_skel:  # move the skeleton
        dofs = ted_anim['dofData'][fi * 2 - 120]
        Character.pose_skeleton(ted_skel['Gs'], ted_skel, dofs)
        ted_glskel.setPose(ted_skel['Gs'])
        offset = ted_skel['Gs'][13]  # ted_skel['jointNames'].index('VSS_Head')

        cams = QApp.app.getLayers()['cameras']
        tmp = np.eye(4, 4, dtype=np.float32)
        tmp[:3, :] = offset
        cams.setTransform(tmp)

        if ci >= 0:  # move the camera view to be correct
            camRT = mats[ci][1]
            RT = np.dot(camRT, np.linalg.inv(tmp))
            view.cameras[ci + 1].setRT(RT)

        # update the face geometries to fit the skeleton
        ted_geom.setPose(offset.reshape(1, 3, 4))
        tony_geom.setPose(offset.reshape(1, 3, 4))
        #TODO head_points,c3d_points,surface_points,ted_geom2

    frame = c3d_frames[fi][extract]
    which = np.where(frame[:, 3] == 0)[0]
    x3ds = frame[which, :3]
    #print which,x3ds.shape,ted_lo_rest.shape,ted_lo_mat.shape
    bnds = np.array([[0, 1]] * ted_lo_mat.shape[0], dtype=np.float32)
    tony_shape_vector[:] = OBJReader.fitLoResShapeMat(ted_lo_rest,
                                                      ted_lo_mat,
                                                      x3ds,
                                                      Aoffset=10.0,
                                                      Boffset=3.0,
                                                      x_0=tony_shape_vector,
                                                      indices=which,
                                                      bounds=bnds)
    #global tony_shape_vectors; tony_shape_vector[:] = tony_shape_vectors[newFrame%len(tony_shape_vectors)]

    #tony_shape_vector *= 0.
    #tony_shape_vector += (np.random.random(len(tony_shape_vector)) - 0.5)*0.2
    if 1:
        ted_shape_v = np.dot(ted_shape_mat_T, tony_shape_vector).reshape(-1, 3)
    else:
        ted_shape_v = np.zeros_like(ted_obj['v'])
        ISCV.dot(ted_shape_mat_T, tony_shape_vector, ted_shape_v.reshape(-1))
    tony_shape_v = ted_shape_v
    #tony_shape_v = tony_shape['v']*frac
    ted_geom.setVs(ted_obj['v'] + ted_shape_v)  #ted_shape['v'] * frac)
    tony_geom.setVs(tony_obj['v'] + tony_shape_v -
                    np.array([200, 0, 0], dtype=np.float32))
    ted_geom2.setVs(ted_obj['v'] * (1.0 - frac) +
                    tony_tedtopo_obj['v'] * frac +
                    np.array([200, 0, 0], dtype=np.float32))
    #if len(ted_shape_v) == len(tony_shape_v):
    #	tony_geom2.setVs(tony_obj['v'] + ted_shape_v - [400,0,0])
    #	diff_geom.setVs(ted_obj['v'] + tony_shape_v - ted_shape_v - [600,0,0])

    #print [c3d_labels[i] for i in which]
    surface_points.vertices = np.dot(ted_lo_mat.T,
                                     tony_shape_vector).T + ted_lo_rest
    surface_points.colour = [0, 1, 0, 1]  # green
    c3d_points.vertices = x3ds
    c3d_points.colour = [1, 0, 0, 1]  # red

    QApp.app.refreshImageData()
    QApp.app.updateGL()
Example #10
0
    def cook(self, location, interface, attrs):
        if not self.initialised: return
        self.frame = interface.frame()
        imgs = []

        offset = attrs['offset'] if 'offset' in attrs else 0
        stepSize = attrs['step'] if 'step' in attrs else 1

        # Check if we are looking through a single active camera or not as that will be more efficient.
        # Here we are not interested in knowing whether or not we found anything
        activeCameraIdx = interface.attr('activeCameraIdx',
                                         atLocation=interface.root(),
                                         log=False)
        if 'onlyActiveCamera' in attrs and attrs[
                'onlyActiveCamera'] and activeCameraIdx is not None and activeCameraIdx != -1:
            frameNum = max(
                (self.frame + offset + self.timecodeOffsets[activeCameraIdx]) *
                stepSize, 0)
            md = self.movies[activeCameraIdx]

            try:
                MovieReader.readFrame(md,
                                      seekFrame=frameNum,
                                      playingAudio=False)
            except:
                self.logger.error(
                    'Could not read frame: %d for active camera %d' %
                    (self.frame, activeCameraIdx))
                return

            img = np.frombuffer(md['vbuffer'], dtype=np.uint8).reshape(
                md['vheight'], md['vwidth'], 3)
            imgs.append(img)

        else:
            # Process all cameras (slower but necessary for processes/Ops that need all the data)
            for ci, md in enumerate(self.movies):
                try:
                    frameNum = max(
                        (self.frame + offset + self.timecodeOffsets[ci]) *
                        stepSize, 0)
                    MovieReader.readFrame(md,
                                          seekFrame=frameNum,
                                          playingAudio=False)
                    img = np.frombuffer(md['vbuffer'], dtype=np.uint8).reshape(
                        md['vheight'], md['vwidth'], 3)
                    imgs.append(img)

                except:
                    self.logger.error(
                        'Could not read frame: %d for camera %d' %
                        (self.frame, ci))
                    return

        self.attrs['imgs'] = imgs
        interface.createChild(interface.name(),
                              'cameras',
                              atLocation=interface.parentPath(),
                              attrs=self.attrs)

        if self.timecode: interface.setAttr('timecode', self.timecode)