def getActiveCameraIndex(): idx = None try: idx = QApp.view().cameras.index(QApp.view().camera) - 1 except Exception as e: renderLogger.error('Unable to determine active camera index: %s', str(e)) return idx
def process_frame(deinterlacing, detectingWands, frame, opts, pair): ci, md = pair img = get_movie_frame(md, frame, deinterlacing) #data = filter_movie_frame(img, small_blur, large_blur) #img, data = get_processed_movie_frame(md, frame, small_blur, large_blur, deinterlacing) QApp.view().cameras[ci + 1].invalidateImageData() """ if 1: # show the filtered image img[:] = data pass if 0: # crush the image to see the blobs lookup = np.zeros(256, dtype=np.uint8) lookup[threshold_bright:] = 255 lookup[255 - threshold_dark_inv:threshold_bright] = 128 img[:] = lookup[img] """ if 1: good_darks, pts0, good_lights, pts1, data = get_dark_and_light_points( img, frame, ci, opts) if 1: # show the filtered image #print "data before insertion", type(data), data.shape #sys.exit(0) img[:] = data if 0: # crush the image to see the blobs lookup = np.zeros(256, dtype=np.uint8) lookup[threshold_bright:] = 255 lookup[255 - threshold_dark_inv:threshold_bright] = 128 img[:] = lookup[img] # good_darks, pts0 = Detect.detect_dots(255-data, opts['threshold_dark_inv'], opts) # good_lights,pts1 = Detect.detect_dots(data, opts['threshold_bright'], opts) print ci, frame, len(pts0), len(pts1), 'good points (darks,lights)' if detectingWands: ratio = 2.0 x2d_threshold = 0.5 straightness_threshold = 0.01 * 2 match_threshold = 0.07 * 2 x2ds_labels = -np.ones(pts1.shape[0], dtype=np.int32) x2ds_splits = np.array([0, pts1.shape[0]], dtype=np.int32) ISCV.label_T_wand(pts1, x2ds_splits, x2ds_labels, ratio, x2d_threshold, straightness_threshold, match_threshold) print x2ds_labels for r, li in zip(good_lights, x2ds_labels): if li != -1: # make some red boxes dx, dy = 10, 10 img[int(r.sy - dy):int(r.sy + dy), int(r.sx - dx):int(r.sx + dx), 0] = 128 else: pts0 = pts1 = [] return (pts0, pts1)
def set_frame_cb(fi): view = QApp.view() # TODO should be part of the callback? if fi != frame_number(): State.setKey('/frame_number', fi) State.push('change frame') img = get_frame_image(fi) update_gui_image(view, img) update_markup_mesh(view) QApp.app.updateMenus() # trigger a full refesh here (TODO not needed?)
def set_frame_cb(frame): global md readFrame(md, seekFrame=frame) img = np.frombuffer(md['vbuffer'], dtype=np.uint8).reshape(md['vheight'], md['vwidth'], 3) image_mesh = QApp.app.getLayer('image_mesh') image_mesh.setImage(img) view = QApp.view() view.refreshImageData() view.updateGL()
def set_frame_cb(frame): global g_aam_model, g_images, g_shapes, g_predictor, template_vs indx = frame/4 which = frame%4 img = np.zeros((160,160,3),dtype=np.uint8) if which == 0: img = g_images[indx].copy() shp = g_shapes[indx].copy() tex = textures[indx] elif which == 1: img = g_images[indx].copy() shp = g_shapes[indx].copy() tex = textures[indx] shp = Face.track_face(img, g_predictor, shp) elif which == 2: shp = g_aam_model['shapes'][indx] tex = g_aam_model['textures'][indx] else: shp = g_aam_model['ref_shape'] + np.dot(g_aam_model['shapes_u'][indx]*g_aam_model['shapes_s'],\ g_aam_model['shapes_vt']).reshape(-1,2) tex = g_aam_model['texture_mean'] + np.dot(g_aam_model['texture_u'][indx]*g_aam_model['texture_s'],\ g_aam_model['texture_vt']).reshape(-1,3) width,height = img.shape[1],img.shape[0] w,h = width*0.5,height*0.5 if which >= 2: shp = Face.normalize_shape(shp, model['ref_pinv']) shp = (shp + 1) * [w,h] np.clip(tex,0,255,out=tex) Face.render_texture(tex, img, shp, model['model_indices'], model['model_weights']) geo_mesh = QApp.app.getLayer('geo_mesh') size = 68+4 vs = np.zeros((size,3),dtype=np.float32) vs[:size-4,:2] = shp vs[size-4:size,:2] = Face.get_boundary(shp, template_vs) geo_mesh.setVs(vs) geo_mesh.transforms[0][:,:3] = [[1,0,0],[0,1,0],[0,0,1],[-w,1000-h,0.1]] image_mesh = QApp.app.getLayer('image_mesh') image_mesh.setVs(np.array([[-w,-h,0],[w,-h,0],[w,h,0],[-w,h,0]], dtype=np.float32)) image_mesh.setImage(img) QApp.view().updateGL()
def dirtyCB(dirty): # triggered by a state change, this allows the app to synch with the state # TODO this is the correct place to deal with changes due to eg menu items or hot keys # should be able to remove eg updateGL from everywhere else, really #print 'dirty',dirty if dirty: outliner = QApp.app.qoutliner outliner.set_root(outliner.root) # TODO this causes a total rebuild global g_predictor, g_aam_model if '/predictor' in dirty: g_predictor = None if '/aam' in dirty: g_aam_model = None if '/vnames' in dirty: #print 'setting names',State.getKey('/vnames') QApp.view().getLayer('markup_mesh').names = State.getKey('/vnames', []) if '/markup_mesh_sel' in dirty: QApp.view().getLayer('markup_mesh').selectedIndex = State.getKey( '/markup_mesh_sel', -1) if '/edges' in dirty: QApp.view().getLayer('markup_mesh').edges = State.getKey( '/edges', None) QApp.view().getLayer('ref_mesh').edges = State.getKey('/edges', None)
def intersectRaysCB(fi, raw_frames, mats, primitives, primitives2D, track3d): global prev_frame skipping = prev_frame is None or abs(fi - prev_frame) > 10 prev_frame = fi view = QApp.view() points, altpoints = primitives g2d = primitives2D[0] frame = raw_frames[fi] # frame = x2d_frames[fi] x2ds_data, x2ds_splits = GiantReader.frameCentroidsToDets(frame, None) g2d.setData(x2ds_data, x2ds_splits) if skipping: x3ds, x3ds_labels = track3d.boot(x2ds_data, x2ds_splits) else: x3ds, x3ds_labels = track3d.push(x2ds_data, x2ds_splits) points.setData(x3ds) view.updateGL()
def rbfn_view_cb(fi, attrs): # g_mode = 1 global g_rbfn group,gn,pn,slider_indices,slider_names,pose_splits = rbfn_info_from_frame(fi) QApp.view().displayText = [(10, 100, gn), (10,125, pn)] img = group['images'][pn] img = JPEG.decompress(img) h,wm = img.shape[0]*0.5,img.shape[1]*0.5 out_shape = extract_x2ds(group, pn, g_rbfn['marker_names']) svs = group['slider_data'][pn][slider_indices] State._setKey('/root/sliders/attrs', dict(zip(slider_names, svs))) # NO UNDO # compensate for roll, translation and scale norm_shape, head_pan, head_tilt, A = stabilize_shape(out_shape) # extract angles from the measured values mirror_scale = -1 if attrs['mirroring'] else 1 new_pose = np.degrees(np.arctan2([head_pan*mirror_scale, head_tilt, -mirror_scale*A[1][0]],[2,2,A[1][1]])) head_roll = -np.arctan2(A[1][0],A[1][1]) head_pan = np.arctan2(head_pan, 2.0) head_tilt = np.arctan2(head_tilt, 2.0) #print head_roll, head_pan, head_tilt slider_names, slider_values = applyRetarget(g_rbfn, norm_shape) svs[np.where(svs < 1e-4)] = 0 slider_values[np.where(slider_values < 1e-4)] = 0 #print zip(slider_values,svs) slider_names.extend(['NeckRoll','NeckPan','NeckTilt']) svs = np.clip(svs,0,1) slider_values = np.float32(list(svs)+list(np.degrees([head_roll,head_pan,head_tilt]))) return new_pose,out_shape,norm_shape,img,slider_names,slider_values,A
def set_frame_cb2(frame): global g_predictor, g_predictor_dlib, g_detector size = (len(g_predictor['ref_shape'])+4) geo_vs = np.zeros((size,3), dtype=np.float32) ref_vs = np.zeros((size,3), dtype=np.float32) global g_prev_vs try: g_prev_vs except: g_prev_vs = None if 0: # show_images global g_jpgs; fn = g_jpgs[frame%len(g_jpgs)] img = Face.load_image(fn) img = Face.fix_image(img, max_size=640) use_prev_vs = False # images need booting every frame else: # show_movies global md; MovieReader.readFrame(md, seekFrame=frame) # only update the visible camera img = np.frombuffer(md['vbuffer'], dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3) use_prev_vs = True if 0: # undistort_stuff global g_screen global g_tid, g_bid g_tid,g_bid = Opengl.bind_streaming_image(img, g_tid, g_bid) img = Opengl.renderGL(img.shape[1], img.shape[0], Opengl.quad_render, (g_tid, g_screen, 0.85)) #Opengl.unbind_image(bid) if 0: # rotated_image img = img.transpose((1,0,2)).copy() if 0: # gamma_image lookup = np.array([int(((x/255.0)**0.4545)*255.0) for x in range(256)], dtype=np.uint8) img = lookup[img] #img[:,600:1000] = 0 #img[:,200:600].copy() if 0: # test_rotate import scipy; img = scipy.misc.imrotate(img, frame, interp='bilinear') if 0: # test_rotate_right import scipy; img[:,-img.shape[0]:] = scipy.misc.imrotate(img[:,-img.shape[0]:], frame, interp='bilinear') if 0: # test_filter_image img = ISCV.filter_image(img,4,16) w,h = img.shape[1]*0.5,img.shape[0]*0.5 boot = g_prev_vs if boot is None: boot = Face.detect_face(img, g_predictor, 2) # ,-1) # put -1 at end to boot at any angle tmp = Face.track_face(img, g_predictor, boot) if use_prev_vs and boot is not None: g_prev_vs = tmp if frame == 0 or Face.test_reboot(img, g_prev_vs): g_prev_vs = None global template_vs geo_vs[:size-4,:2] = tmp geo_vs[size-4:size,:2] = Face.get_boundary(geo_vs[:size-4,:2], template_vs) if 0: # show_aam global g_aam_model shape_u, tex_u, A_inv, mn = Face.fit_aam(g_aam_model, tmp, img) Face.render_aam(g_aam_model, A_inv*0.1, mn*0.1, shape_u, tex_u, img) su,tu = Face.normalized_aam_coords(g_aam_model, shape_u, tex_u) res = Face.aam_residual(g_aam_model, tmp, img) QApp.view().displayText = [(10,100,'%f' % np.linalg.norm(tu)),(10,125,'%f' % np.linalg.norm(su)),(10,150,'%f'%res)] if 0: # show_extracted_texture global g_aam_model_indices,g_aam_model_weights pixels = Face.extract_texture(img, geo_vs[:size,:2], g_aam_model_indices, g_aam_model_weights) global template_vs Face.render_texture(pixels, img, template_vs, g_aam_model_indices, g_aam_model_weights) geo_mesh = QApp.app.getLayer('geo_mesh') geo_mesh.setVs(geo_vs) geo_mesh.transforms[0][:,:3] = [[1,0,0],[0,1,0],[0,0,1],[-w,1000-h,0.1]] image_mesh = QApp.app.getLayer('image_mesh') image_mesh.setVs(np.array([[-w,-h,0],[w,-h,0],[w,h,0],[-w,h,0]], dtype=np.float32)) image_mesh.setImage(img) QApp.view().updateGL()
def animateHead(newFrame): global ted_geom, ted_geom2, ted_shape, tony_geom, tony_shape, tony_geom2, tony_obj, ted_obj, diff_geom, c3d_frames, extract global tony_shape_vector, tony_shape_mat, ted_lo_rest, ted_lo_mat, c3d_points global md, movies tony_geom.image, tony_geom.bindImage, tony_geom.bindId = ted_geom.image, ted_geom.bindImage, ted_geom.bindId # reuse the texture! fo = 55 MovieReader.readFrame(md, seekFrame=((newFrame + fo) / 2)) view = QApp.view() for ci in range(0, 4): view.cameras[ci + 1].invalidateImageData() ci = view.cameras.index(view.camera) - 1 if ci >= 0: MovieReader.readFrame(movies[ci], seekFrame=(newFrame + fo)) # only update the visible camera frac = (newFrame % 200) / 100. if (frac > 1.0): frac = 2.0 - frac fi = newFrame % len(c3d_frames) if ted_skel: # move the skeleton dofs = ted_anim['dofData'][fi * 2 - 120] Character.pose_skeleton(ted_skel['Gs'], ted_skel, dofs) ted_glskel.setPose(ted_skel['Gs']) offset = ted_skel['Gs'][13] # ted_skel['jointNames'].index('VSS_Head') cams = QApp.app.getLayers()['cameras'] tmp = np.eye(4, 4, dtype=np.float32) tmp[:3, :] = offset cams.setTransform(tmp) if ci >= 0: # move the camera view to be correct camRT = mats[ci][1] RT = np.dot(camRT, np.linalg.inv(tmp)) view.cameras[ci + 1].setRT(RT) # update the face geometries to fit the skeleton ted_geom.setPose(offset.reshape(1, 3, 4)) tony_geom.setPose(offset.reshape(1, 3, 4)) #TODO head_points,c3d_points,surface_points,ted_geom2 frame = c3d_frames[fi][extract] which = np.where(frame[:, 3] == 0)[0] x3ds = frame[which, :3] #print which,x3ds.shape,ted_lo_rest.shape,ted_lo_mat.shape bnds = np.array([[0, 1]] * ted_lo_mat.shape[0], dtype=np.float32) tony_shape_vector[:] = OBJReader.fitLoResShapeMat(ted_lo_rest, ted_lo_mat, x3ds, Aoffset=10.0, Boffset=3.0, x_0=tony_shape_vector, indices=which, bounds=bnds) #global tony_shape_vectors; tony_shape_vector[:] = tony_shape_vectors[newFrame%len(tony_shape_vectors)] #tony_shape_vector *= 0. #tony_shape_vector += (np.random.random(len(tony_shape_vector)) - 0.5)*0.2 if 1: ted_shape_v = np.dot(ted_shape_mat_T, tony_shape_vector).reshape(-1, 3) else: ted_shape_v = np.zeros_like(ted_obj['v']) ISCV.dot(ted_shape_mat_T, tony_shape_vector, ted_shape_v.reshape(-1)) tony_shape_v = ted_shape_v #tony_shape_v = tony_shape['v']*frac ted_geom.setVs(ted_obj['v'] + ted_shape_v) #ted_shape['v'] * frac) tony_geom.setVs(tony_obj['v'] + tony_shape_v - np.array([200, 0, 0], dtype=np.float32)) ted_geom2.setVs(ted_obj['v'] * (1.0 - frac) + tony_tedtopo_obj['v'] * frac + np.array([200, 0, 0], dtype=np.float32)) #if len(ted_shape_v) == len(tony_shape_v): # tony_geom2.setVs(tony_obj['v'] + ted_shape_v - [400,0,0]) # diff_geom.setVs(ted_obj['v'] + tony_shape_v - ted_shape_v - [600,0,0]) #print [c3d_labels[i] for i in which] surface_points.vertices = np.dot(ted_lo_mat.T, tony_shape_vector).T + ted_lo_rest surface_points.colour = [0, 1, 0, 1] # green c3d_points.vertices = x3ds c3d_points.colour = [1, 0, 0, 1] # red QApp.app.refreshImageData() QApp.app.updateGL()
def setFrame_cb(fi): attrs = State.getKey('/root/ui/attrs/') global g_setting_frame if g_setting_frame: return g_setting_frame = True try: # within this loop we handle the timeline, which could trigger calling this function recursively global g_mode, g_frame, g_TIS_server, g_neutral_corrective_shape global g_smooth_pose view = QApp.view() cid = view.cameraIndex() if cid != g_mode: # deal with changing modes g_mode = cid if g_mode == 0: if g_md is not None: QApp.app.qtimeline.setRange(0, g_md['vmaxframe']) elif g_mode == 1: pose_splits = rbfn_pose_splits() QApp.app.qtimeline.setRange(0, pose_splits[-1]-1) new_frame = g_frame.get(g_mode,fi) if new_frame != fi: QApp.app.qtimeline.frame = new_frame fi = new_frame except Exception as e: print 'exc setFrame',e g_setting_frame = False g_frame[g_mode] = fi if not attrs['setting_neutral']: g_neutral_corrective_shape = 0 new_pose,new_shape,norm_shape,img,slider_names,slider_values,A = [track_view_cb,rbfn_view_cb][g_mode](fi,attrs) mirror_scale = -1 if attrs['mirroring'] else 1 h,wm = img.shape[0]*0.5,img.shape[1]*0.5*mirror_scale geo_vs = np.zeros((new_shape.shape[0],3), dtype=np.float32) if attrs['debugging']: # display the stabilised data geo_vs[:,:2] = norm_shape geo_vs *= 200 geo_vs[:,:2] += np.int32(np.mean(new_shape, axis=0)/200)*200 else: # display the tracking data geo_vs[:,:2] = new_shape geo_mesh,image_mesh,bs_mesh = QApp.app.getLayers(['geo_mesh', 'image_mesh', 'bs_mesh']) bs_mesh.visible = attrs['show_harpy'] if bs_mesh.visible: global g_bs_vs, g_bs_shape_mat_T bs_mesh.setVs(g_bs_vs + np.dot(g_bs_shape_mat_T, np.clip(slider_values[:-3],0,1))) # compute the Harpy position R = Calibrate.composeR(new_pose*[1,-1,-1]) if g_mode == 1: R = np.eye(3) # TODO bs_ts = Calibrate.composeRT(R,[0,1720,0],0) # compensate for the offset of the Harpy (temples ~1720mm above origin) scale = 1.0/np.linalg.norm(160.*A) # IPD (64mm) / 0.4 (ref_shape) = 160. off = np.mean(new_shape[[0,16]],axis=0) # get the position of the temples (pixels) g_smooth_pose[g_mode] = filter_data(np.float32([scale,off[0],off[1]]), g_smooth_pose.setdefault(g_mode,None), 10.0) pose = g_smooth_pose[g_mode] bs_ts[:3] *= pose[0] bs_ts[:3,3] += [pose[1]-abs(wm),1000+pose[2]-h,0] # offset screen-right 300mm bs_ts[:3,3] += (pose[0]*attrs['harpy_xoffset'])*np.float32([np.cos(np.radians(view.camera.cameraRoll)),-np.sin(np.radians(view.camera.cameraRoll)),0.0]) bs_mesh.transforms[0] = bs_ts.T geo_mesh.setVs(geo_vs) geo_mesh.colour=[0 if attrs['streaming_TIS'] else 1,1 if attrs['streaming_TIS'] else 0,0,1] geo_mesh.transforms[0][:,:3] = [[mirror_scale,0,0],[0,1,0],[0,0,1],[-wm,1000-h,0.1]] image_mesh.setVs(np.float32([[-wm,-h,0],[wm,-h,0],[wm,h,0],[-wm,h,0]])) image_mesh.setImage(img) if attrs['unreal']: if not attrs['streaming_TIS']: toggle_unreal() ret, activeConnections = g_TIS_server.WriteAll(PyTISStream.getBlendshapeData(slider_names, slider_values)) if not ret: print "Server is not Initialised" State._setKey('/root/ui/attrs/streaming_TIS', False) else: # Turn off streaming if attrs['streaming_TIS']: toggle_unreal() QApp.app.updateGL()
def cb(frame): global g_record, g_frame g_frame = frame global g_camera_rays, g_camera_mat #print 'in cb' img = freenect.sync_get_video()[0] geom_mesh = QApp.app.getLayer('geom_mesh') geom_mesh.setImage(img) if 1: depths = freenect.sync_get_depth(format=freenect.DEPTH_REGISTERED)[0] #print 'depths',np.median(depths) if 0: # recording if frame not in g_record: return img, depths = g_record[frame]['video'], g_record[frame]['depths'] g_record[frame] = {'video': img.copy(), 'depths': depths.copy()} if frame == 99: IO.save('dump', g_record) depths_sum = np.array(depths != 0, dtype=np.int32) lookup = np.array([0, 1, 0.5, 1.0 / 3, 0.25], dtype=np.float32) if 1: # average depths_lo = np.array(depths[::2, ::2] + depths[1::2, ::2] + depths[::2, 1::2] + depths[1::2, 1::2], dtype=np.float32) depths_lo = depths_lo * lookup[ (depths_sum[::2, ::2] + depths_sum[1::2, ::2] + depths_sum[::2, 1::2] + depths_sum[1::2, 1::2]).reshape(-1)].reshape(depths_lo.shape) else: # fullsize depths_lo = depths * lookup[depths_sum.reshape(-1)].reshape( depths_lo.shape) K, RT, P, ks, T, wh = g_camera_mat vs = depths_to_points(g_camera_rays, T, depths_lo) geom_mesh.setVs(vs.reshape(-1, 3)) #QApp.view().setImage(img, img.shape[0], img.shape[1], img.shape[2]) #camera = QApp.view().camera #geom_mesh.image = camera.image #geom_mesh.bindImage = camera.bindImage #geom_mesh.bindId = camera.bindId global g_predictor, reference_3d, geo_vs, geo_vts h, w, _3 = img.shape global g_prev_vs try: g_prev_vs except: g_prev_vs = None use_prev_vs = True if g_prev_vs is None: reference_3d[:, :2] = g_predictor['ref_shape'] * [100, 100] tmp = Face.detect_face(img, g_predictor) if g_prev_vs is None else g_prev_vs tmp = Face.track_face(img, g_predictor, tmp) if use_prev_vs: g_prev_vs = tmp if frame == 0 or Face.test_reboot(img, g_prev_vs): g_prev_vs = None geo_vts[:len(tmp)] = tmp geo_vts[:, 1] = img.shape[0] - geo_vts[:, 1] current_shape = geo_vts[:len(tmp)].copy() if 1: ds = extract_depths(vs, current_shape * 0.5) M, inliers = Calibrate.rigid_align_points_inliers(ds, reference_3d, scale=True, threshold_ratio=5.0) ds = np.dot(ds, M[:3, :3].T) + M[:, 3] which = np.where(np.sum((reference_3d - ds)**2, axis=1) < 100 * 100)[0] reference_3d[which] = reference_3d[which] * 0.99 + ds[which] * 0.01 reference_3d[ inliers] = reference_3d[inliers] * 0.95 + ds[inliers] * 0.05 ds[:] = reference_3d[:] M[1, 3] += 1000 M[0, 3] -= 300 else: M = np.eye(3, 4, dtype=np.float32) M[1, 3] += 1000 geom_mesh.setPose(M.reshape(1, 3, 4)) ref_pinv = g_predictor['ref_pinv'] xform = np.dot(ref_pinv, current_shape) ut, s, v = np.linalg.svd(xform) s = (s[0] * s[1])**-0.5 xform_inv = np.dot(v.T, ut.T) * s current_shape = np.dot(current_shape - np.mean(current_shape, axis=0), xform_inv) * 100. geo_vs[:] = 0 geo_vs[:len(current_shape), :2] = current_shape geo_vs[:70] = reference_3d #geo_vs[:68,:] += [0,100,5500] #print geo_vts[:4],w,h geo_mesh = QApp.app.getLayer('geo_mesh') geo_mesh.setVs(geo_vs, vts=geo_vts * np.array([1.0 / w, 1.0 / h], dtype=np.float32)) geo_mesh.setImage(img) #geo_mesh.transforms[0][:,:3] = [[1,0,0],[0,1,0],[0,0,1],[0,1000,0.1]] if 1: global g_model w, h = 160, 160 shp = geo_vs[:68, :2] shape_u, tex_u, A_inv, mn = Face.fit_aam(g_model, tmp, img) Face.render_aam(g_model, A_inv * 0.5, mn * 0.5, shape_u, tex_u, img) img_mesh = QApp.app.getLayer('img_mesh') img_mesh.setImage(img) QApp.view().updateGL()
def setFrame(frame): global State, mats, movieFilenames, primitives global movies, primitives2D, deinterlacing, detectingWands, dot_detections, track3d, prev_frame, booting, trackGraph key = State.getKey('dotParams/attrs') skipping, prev_frame = (frame != prev_frame and frame - 1 != prev_frame), frame booting = 10 if skipping else booting - 1 p0, p1 = [], [] if True: #dot_detections is None: for pair in enumerate(movies): pts = process_frame(deinterlacing, detectingWands, frame, key, pair) p0.append(pts[0]) p1.append(pts[1]) def make_bounds(lens): return np.array([sum(lens[:x]) for x in xrange(len(lens) + 1)], dtype=np.int32) data0 = np.array(np.concatenate(p0), dtype=np.float32).reshape(-1, 2), make_bounds( map(len, p0)) data1 = np.array(np.concatenate(p1), dtype=np.float32).reshape(-1, 2), make_bounds( map(len, p1)) else: #dot_detections = movies_to_detections(movies, [frame], deinterlacing, key) data0, data1 = dot_detections[frame] if dot_detections.has_key( frame) else dot_detections.values()[0] for ci, md in enumerate(movies): try: MovieReader.readFrame(md, seekFrame=frame) except: print 'oops', frame return None, None #img = np.frombuffer(md['vbuffer'],dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3) QApp.view().cameras[ci + 1].invalidateImageData() data0 = data0[0].copy(), data0[ 1] # so that undistort doesn't modify the raw detections data1 = data1[0].copy(), data1[1] # TODO, move this to the viewer... data0 = ViconReader.frameCentroidsToDets(data0, mats) data1 = ViconReader.frameCentroidsToDets(data1, mats) primitives2D[0].setData(data0[0], data0[1]) primitives2D[1].setData(data1[0], data1[1]) #print x2ds_labels if len(movieFilenames) is not 1: if 1: #x2ds_data, x2ds_splits = data0 # dark points only x2ds_data, x2ds_splits = data1 # light points only if skipping: x3ds, x3ds_labels = track3d.boot(x2ds_data, x2ds_splits) #trackGraph = Label.TrackGraph() else: x3ds, x3ds_labels = track3d.push(x2ds_data, x2ds_splits) # coarse bounding box if False: for xi, x in zip(x3ds_labels, x3ds): if x[0] < -200 or x[0] > 200 or x[1] < 800 or x[ 1] > 1200 or x[2] < -50 or x[2] > 300: track3d.x2ds_labels[np.where( track3d.x2ds_labels == xi)[0]] = -1 x[:] = 0 primitives[0].setData(x3ds) #trackGraph.push(x3ds,x3ds_labels) #primitives[0].graph = trackGraph.drawing_graph() elif False: Ps = np.array([m[2] / (m[0][0, 0]) for m in mats], dtype=np.float32) data = data0 # dark points #data = data1 # light points x3ds, x2ds_labels = Recon.intersect_rays(data[0], data[1], Ps, mats, tilt_threshold=0.003, x2d_threshold=0.02, x3d_threshold=5.0, min_rays=2) primitives[0].setData(x3ds) if detectingTiara: global c3d_frames frame = c3d_frames[(frame - 55) % len(c3d_frames)] which = np.where(frame[:, 3] == 0)[0] x3ds = frame[which, :3] #print frame,'len',len(x3ds) primitives[1].setData(x3ds) QApp.app.refreshImageData() QApp.app.updateGL()
def intersectRaysCB(fi): global x2d_frames, mats, Ps, c3d_frames, view, primitives, primitives2D, track3d, prev_frame, track_orn, orn_graph, boot, g_all_skels, md, orn_mapper, mar_mapper skipping = prev_frame is None or np.abs(fi - prev_frame) > 10 prev_frame = fi view = QApp.view() points, altpoints = primitives g2d = primitives2D[0] frame = x2d_frames[fi] x2ds_data, x2ds_splits = ViconReader.frameCentroidsToDets(frame, mats) g2d.setData(x2ds_data, x2ds_splits) if skipping: x3ds, x3ds_labels = track3d.boot(x2ds_data, x2ds_splits) #trackGraph = Label.TrackGraph() boot = -10 else: x3ds, x3ds_labels = track3d.push(x2ds_data, x2ds_splits) if False: boot = boot + 1 if boot == 0: x2d_threshold_hash = 0.01 penalty = 10.0 # the penalty for unlabelled points. this number should be about 10. to force more complete labellings, set it higher. maxHyps = 500 # the number of hypotheses to maintain. print "booting:" numLabels = len(orn_graph[0]) l2x = -np.ones(numLabels, dtype=np.int32) label_score = ISCV.label_from_graph(x3ds, orn_graph[0], orn_graph[1], orn_graph[2], orn_graph[3], maxHyps, penalty, l2x) clouds = ISCV.HashCloud2DList(x2ds_data, x2ds_splits, x2d_threshold_hash) which = np.array(np.where(l2x != -1)[0], dtype=np.int32) pras_score, x2d_labels, vels = Label.project_assign( clouds, x3ds[l2x[which]], which, Ps, x2d_threshold=x2d_threshold_hash) print fi, label_score, pras_score labelled_x3ds = x3ds[l2x[which]] print track_orn.bootPose(x2ds_data, x2ds_splits, x2d_labels) if boot > 0: track_orn.push(x2ds_data, x2ds_splits, its=4) #x3ds,x2ds_labels = Recon.intersect_rays(x2ds_data, x2ds_splits, Ps, mats, seed_x3ds = None) points.setData(x3ds) if c3d_frames != None: c3ds = c3d_frames[(fi - 832) / 2] true_labels = np.array(np.where(c3ds[:, 3] == 0)[0], dtype=np.int32) x3ds_true = c3ds[true_labels, :3] altpoints.setData(x3ds_true) ci = view.cameraIndex() - 1 if True: #ci == -1: MovieReader.readFrame(md, seekFrame=max((fi - 14) / 4, 0)) QApp.app.refreshImageData() (orn_skel_dict, orn_t) = g_all_skels['orn'] orn_mesh_dict, orn_skel_mesh, orn_geom_mesh = orn_t orn_anim_dict = orn_skel_dict['anim_dict'] orn_skel_dict['chanValues'][:] = orn_anim_dict['dofData'][fi] Character.updatePoseAndMeshes(orn_skel_dict, orn_skel_mesh, orn_geom_mesh) (mar_skel_dict, mar_t) = g_all_skels['mar'] mar_anim_dict = mar_skel_dict['anim_dict'] mar_mesh_dict, mar_skel_mesh, mar_geom_mesh = mar_t Character.updatePoseAndMeshes(mar_skel_dict, mar_skel_mesh, mar_geom_mesh, mar_anim_dict['dofData'][fi]) from PIL import Image #orn_geom_mesh.setImage((md['vbuffer'],(md['vheight'],md['vwidth'],3))) #orn_geom_mesh.refreshImage() w, h = 1024, 1024 cam = view.cameras[0] cam.refreshImageData(view) aspect = float(max(1, cam.bindImage.width())) / float( cam.bindImage.height()) if cam.bindImage is not None else 1.0 orn_mapper.project(orn_skel_dict['geom_Vs'], aspect) data = Opengl.renderGL(w, h, orn_mapper.render, cam.bindId) orn_geom_mesh.setImage(data) mar_mapper.project(mar_skel_dict['geom_Vs'], aspect) data = Opengl.renderGL(w, h, mar_mapper.render, cam.bindId) mar_geom_mesh.setImage(data) #image = Image.fromstring(mode='RGB', size=(w, h), data=data) #image = image.transpose(Image.FLIP_TOP_BOTTOM) #image.save('screenshot.png') if 0: global g_screen image = Opengl.renderGL(1920, 1080, Opengl.quad_render, (cam.bindId, g_screen)) import pylab as pl pl.imshow(image) pl.show() view.updateGL()
def paintGL(self, p0=0, p1=None, drawOpts=DRAWOPT_ALL): ''' :param drawOpts: OR combination of draw flags. default is :data:`UI.DRAWOPT_ALL` ''' #if not self.d['draw'] or not self.d['visible']: return doingSelection = (p1 is not None) if p1 is None: p1 = len(self) if not self.GL_is_initialised: self.initializeGL() if p1 == 0: return # don't render if no vertices if self.image != self.bindImage: if self.image == []: self.image = QtGui.QPixmap(self.imageFilename).toImage() self.imageFlipped = False if self.bindImage is not None: self.deleteTexture(self.bindId) self.bindId,self.bindImage = long(0),None if self.image is not None: global win if self.view == None: from UI import QApp; self.view = QApp.view() # TODO self.bindId = self.view.bindTexture(self.image) self.bindImage = self.image if self.bindImage is not None: GL.glEnable(GL.GL_TEXTURE_2D) GL.glBindTexture(GL.GL_TEXTURE_2D, self.bindId) GL.glEnable(GL.GL_BLEND) GL.glEnable(GL.GL_CULL_FACE) GL.glCullFace(GL.GL_BACK) GL.glFrontFace(GL.GL_CCW) GL.glEnable(GL.GL_LIGHTING) GL.glEnable(GL.GL_LIGHT0) Pmat = GL.glGetDoublev(GL.GL_PROJECTION_MATRIX) lightDir = -Pmat[:3,2] # the direction the camera is looking GL.glLightfv(GL.GL_LIGHT0, GL.GL_POSITION, lightDir) GL.glShadeModel(GL.GL_SMOOTH) if self.colour is not None: GL.glMaterialfv(GL.GL_FRONT, GL.GL_AMBIENT_AND_DIFFUSE, self.colour) if self.vs is not None: GL.glEnableClientState(GL.GL_VERTEX_ARRAY) self.vs.bind() GL.glVertexPointerf(self.vs) if self.vts is not None: GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY) self.vts.bind() GL.glTexCoordPointerf(self.vts) if self.vns is not None: GL.glEnableClientState(GL.GL_NORMAL_ARRAY) self.vns.bind() GL.glNormalPointerf(self.vns) if self.tris is not None and DRAWOPT_GEOMS & drawOpts: if not doingSelection: GL.glUseProgram(self.shader2) self.tris.bind() if self.drawStyle == 'wire': GL.glShadeModel(GL.GL_FLAT) GL.glLineWidth(1) GL.glDrawElements(GL.GL_LINES, (p1-p0)*3, GL.GL_UNSIGNED_INT, self.tris + p0*12) GL.glShadeModel(GL.GL_SMOOTH) elif self.drawStyle == 'smooth': GL.glDrawElements(GL.GL_TRIANGLES, (p1-p0)*3, GL.GL_UNSIGNED_INT, self.tris + p0*12) #GL.glDrawElementsui(GL.GL_TRIANGLES, self.tris) elif self.drawStyle == 'wire_over_smooth': GL.glDrawElements(GL.GL_TRIANGLES, (p1-p0)*3, GL.GL_UNSIGNED_INT, self.tris + p0*12) #GL.glDrawElementsui(GL.GL_TRIANGLES, self.tris) GL.glShadeModel(GL.GL_FLAT) GL.glMaterialfv(GL.GL_FRONT, GL.GL_AMBIENT_AND_DIFFUSE, [0,0,0,1]) GL.glLineWidth(1) GL.glDrawElementsui(GL.GL_LINES, self.tris) GL.glMaterialfv(GL.GL_FRONT, GL.GL_AMBIENT_AND_DIFFUSE, self.colour) GL.glShadeModel(GL.GL_SMOOTH) self.tris.unbind() if self.transformData is not None and self.transforms is not None and DRAWOPT_GEOMS & drawOpts: if not doingSelection: GL.glUseProgram(self.shader) GL.glUniformMatrix4fv(self.shader_myMat, len(self.transforms), GL.GL_FALSE, self.transforms) # put the transforms in myMat GL.glEnableVertexAttribArray(self.shader_bi) self.vtis.bind() GL.glVertexAttribIPointer(self.shader_bi, 1, GL.GL_UNSIGNED_INT, 0, self.vtis) # write the vtis to bi self.tis.bind() if self.drawStyle == 'wire': GL.glShadeModel(GL.GL_FLAT) GL.glLineWidth(1) GL.glDrawElementsui(GL.GL_LINES, self.tis) # this is wrong GL.glShadeModel(GL.GL_SMOOTH) elif self.drawStyle == 'smooth': GL.glDrawElementsui(GL.GL_TRIANGLES, self.tis) elif self.drawStyle == 'wire_over_smooth': GL.glDrawElementsui(GL.GL_TRIANGLES, self.tis) GL.glShadeModel(GL.GL_FLAT) GL.glMaterialfv(GL.GL_FRONT, GL.GL_AMBIENT_AND_DIFFUSE, [0,0,0,1]) GL.glLineWidth(1) GL.glDrawElementsui(GL.GL_LINES, self.tis) GL.glMaterialfv(GL.GL_FRONT, GL.GL_AMBIENT_AND_DIFFUSE, self.colour) GL.glShadeModel(GL.GL_SMOOTH) self.tis.unbind() self.vtis.unbind() #for ti,tris in self.transformData: #if len(tris)==0: continue #GL.glUniformMatrix4fv(self.myMat, 1, GL.GL_FALSE, self.transforms[ti]) #GL.glDrawElementsui(self.drawStyle, tris) if self.vs is not None: self.vs.unbind() GL.glDisableClientState(GL.GL_VERTEX_ARRAY) if self.vts is not None: self.vts.unbind() GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY) if self.vns is not None: self.vns.unbind() GL.glDisableClientState(GL.GL_NORMAL_ARRAY) GL.glDisable(GL.GL_LIGHTING) GL.glDisable(GL.GL_BLEND) GL.glDisable(GL.GL_TEXTURE_2D) GL.glBindTexture(GL.GL_TEXTURE_2D, 0) GL.glUseProgram( 0 )
def drawCameras(win, locationName, attrs, interface, picked): from UI.QGLViewer import Camera from UI import GLCameras # Get camera ids. That's all we need if we are just updating if 'camera_ids' not in attrs: print 'Error rendering cameras: No camera_ids found.' return camera_ids = attrs['camera_ids'] if camera_ids is None: return # Create cameras if we don't have any at this point # Get all the gobbin from the cameras if 'mats' not in attrs: return mats = attrs['mats'] if mats is None: return if 'camera_names' in attrs: camera_names = attrs['camera_names'] else: camera_names = [str(n) for n in range(len(camera_ids))] # Check if we've got some image data imageData = False imgs = None if 'imgs' in attrs: imgs = attrs['imgs'] if imgs is not None: vheights, vwidths = None, None if 'vheight' in attrs: vheights = attrs['vheight'] if 'vwidth' in attrs: vwidths = attrs['vwidth'] if vheights is not None and vwidths is not None: if len(imgs) == len(vheights) == len(vwidths): imageData = True else: renderLogger.error( 'Image data not consistent: #imgs[%d] #vheights[%d] #vwidths[%d]' % (len(imgs), len(vheights), len(vwidths))) else: renderLogger.error('vheights and vwidths for images not found') layerExists = win.view().hasLayer(locationName) if layerExists: camsLayer = win.view().getLayer(locationName) if 'colours' in attrs and attrs['colours'] is not None: camsLayer.colours = attrs['colours'] else: camsLayer.colours = None # TODO : Sort out when images don't exist, so vheights and vwidths don't either updateMats = 'updateMats' in attrs and attrs['updateMats'] if imgs is not None and layerExists and not updateMats: if 'updateImage' in attrs and attrs['updateImage']: for ci, img, h, w in zip(camera_ids, imgs, vheights, vwidths): #cam = QApp.view().cameras[ci + 1] cam = QApp.view().cameras[ci + 1] cam.setImageData(img, h, w, 3) return distOverride = {} if 'distortion' in attrs: distOverride = attrs['distortion'] # Go through what we've gathered and create the cameras (with image data if present) for ci, (mat, cid, cname) in enumerate(zip(mats, camera_ids, camera_names)): P, distortion = mat[2], mat[3] if ci in distOverride: distortion = distOverride[ci] if cid in cameraMap and updateMats: camera = QApp.view().cameras[ci + 1] camera.setP(P, distortion=distortion, store=True) elif cid not in cameraMap: cameraName = "%s | %s" % (cname, cid) camera = Camera(cameraName) camera.setP(P, distortion=distortion, store=True) if imageData and ci < len(imgs): img, vheight, vwidth = imgs[ci], vheights[ci], vwidths[ci] camera.setImageData(img, vheight, vwidth, 3) win.view().addCamera(camera) cameraMap[cid] = cname cams = GLCameras(camera_ids, mats) if 'colour' in attrs and attrs['colour'] is not None: cams.colour = attrs['colour'] if 'colours' in attrs and attrs['colours'] is not None: cams.colours = attrs['colours'] win.setLayer(locationName, cams)
def track_view_cb(fi, attrs): # g_mode = 0 global g_webcam, g_md, g_rbfn, g_predictor # runtime options and state global g_prev_smooth_shape, g_prev_vs, g_hmc_boot, g_settle, g_head_pan_tilt_roll if attrs['using_webcam']: if g_webcam is None: g_webcam = WebCam() g_webcam.Open(State.getKey('/root/ui/attrs/cam_offset') + State.getKey('/root/ui/attrs/webcam_index')) g_webcam.SetProperty('FPS', State.getKey('/root/ui/attrs/cam_fps')) g_webcam.SetProperty('FRAME_WIDTH', State.getKey('/root/ui/attrs/cam_width')) g_webcam.SetProperty('FRAME_HEIGHT', State.getKey('/root/ui/attrs/cam_height')) if g_webcam is None: img = np.zeros((16,16,3),dtype=np.uint8) else: img = g_webcam.GetFrame() if img is None: img = np.zeros((16,16,3),dtype=np.uint8) elif g_md is not None: MovieReader.readFrame(g_md, seekFrame=fi) # only update the visible camera img = np.frombuffer(g_md['vbuffer'], dtype=np.uint8).reshape(g_md['vheight'],g_md['vwidth'],3) #QApp.app.qtimeline.setRange(0, g_md['vmaxframe']) else: img = np.zeros((16,16,3),dtype=np.uint8) mirror_scale = -1 if attrs['mirroring'] else 1 rotate = attrs['rotate'] if g_settle >= 0: if g_settle == 0 and g_prev_vs is not None: g_hmc_boot = g_prev_vs.copy() g_settle = g_settle - 1 else: if attrs['HMC_mode'] and g_hmc_boot is not None: g_prev_vs = g_hmc_boot.copy() if attrs['booting'] or Face.test_reboot(img, g_prev_vs): g_prev_vs = Face.detect_face(img, g_predictor, 2, rotate) g_hmc_boot = None # in case we didn't detect a face g_settle = 10 # go into settle mode (10 frames) if g_prev_vs is not None: State.setKey('/root/ui/attrs/booting',False) if attrs['HMC_mode']: g_hmc_boot = g_prev_vs.copy() g_prev_vs = Face.track_face(img, g_predictor, g_prev_vs, rotate=rotate) # compensate for roll, translation and scale norm_shape, head_pan, head_tilt, A = stabilize_shape(g_prev_vs, setting_neutral=attrs['setting_neutral']) # dejitter if attrs['filtering']: g_prev_smooth_shape = filter_data(norm_shape, g_prev_smooth_shape) else: g_prev_smooth_shape = norm_shape.copy() # extract angles from the measured values head_pan_tilt_roll = np.degrees(np.arctan2([head_pan*mirror_scale, head_tilt, -mirror_scale*A[1][0]],[2,2,A[1][1]])) g_head_pan_tilt_roll = filter_data(head_pan_tilt_roll, g_head_pan_tilt_roll, 3.0) camera = QApp.view().camera camera.lockedUpright = False camera.cameraRoll = (-90*rotate if rotate != -1 else g_head_pan_tilt_roll[2]) ret = g_prev_smooth_shape.copy() if attrs['mirroring']: flip_order = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0, 26,25,24,23,22,21,20,19,18,17, 27,28,29,30, 35,34,33,32,31, \ 45,44,43,42, 47,46, 39,38,37,36, 41,40, 54,53,52,51,50,49,48, 59,58,57,56,55, 64,63,62,61,60, 67,66,65, 69,68] ret = ret[flip_order] slider_names, slider_values = applyRetarget(g_rbfn, ret) #State._setKey('/root/sliders/attrs', dict(zip(slider_names, slider_values))) # NO UNDO slider_names.extend(['NeckPan','NeckTilt','NeckRoll']) slider_values = np.float32(list(slider_values)+list(g_head_pan_tilt_roll)) return g_head_pan_tilt_roll.copy(),g_prev_vs.copy(),norm_shape,img,slider_names,slider_values,A
def set_selected_vertex(vi): State.setKey('/markup_mesh_sel', vi) markup_mesh = QApp.view().getLayer('markup_mesh') markup_mesh.selectedIndex = vi