def trackrotation(motive_filename, body): motive.initialize() motive_filename = motive_filename.encode() motive.load_project(motive_filename) body = motive.get_rigid_bodies()[body] for el in range(3): body.reset_orientation() motive.update() window = RotationWindow() window.mesh.position.z = -1.5 def update_body(dt, window, body): motive.update() window.mesh.rotation.xyzw = body.rotation_quats fmt_str = "loc: {:.1f}, {:.1f}, {:.1f}\t rot: {:.2f}, {:.2f}, {:.2f}, {:.2f}" window.label.text = fmt_str.format(*(body.location + body.rotation_quats)) pyglet.clock.schedule(update_body, window, body) pyglet.app.run()
def update_body(dt, window, body): motive.update() window.mesh.rotation.xyzw = body.rotation_quats fmt_str = "loc: {:.1f}, {:.1f}, {:.1f}\t rot: {:.2f}, {:.2f}, {:.2f}, {:.2f}" window.label.text = fmt_str.format(*(body.location + body.rotation_quats))
def update_generator(): last_time= time.time() rig_data = ', '.join(['{0}: {1}'.format(body.name, body.color_name) for body in rigs.values()]) while True: m.update() # Update Title with new FPS try: fps = round(1. / (time.time() - last_time)) last_time = time.time() w.setWindowTitle('MotivePy Viewer. Rigid Bodies = {{{rigid_bodies}}}. Update Rate: {fps} fps'.format(rigid_bodies = rig_data, fps=fps)) except ZeroDivisionError: pass # Plot markers = m.get_unident_markers() if markers: unident_markers.setData(pos=np.array(m.get_unident_markers())) for rig, scat in zip(rigs.values(), w.items[1:-1]): scat.setData(pos=np.array(rig.point_cloud_markers)) # Return Nothing yield
def latency_body_gen(motive_filename, projector_filename, port, screen): with serial.Serial(port=port, timeout=.5) as device: device.write('A') motive.initialize() motive.load_project(motive_filename.encode()) motive.update() # Load projector's Camera object, created from the calib_projector ratcave_utils CLI tool. projector = rc.Camera.from_pickle(projector_filename.encode()) display = pyglet.window.get_platform().get_default_display() screen = display.get_screens()[screen] window = LatencyDisplayApp(projector=projector, serial_device=device, fullscreen=True, screen=screen) def update_body(dt, window, body): motive.update() window.dot.position.xyz = body.location pyglet.clock.schedule(update_body, window, body) pyglet.app.run()
def write_video(cam, writer, record_time, save_video=True): """ If save_video=True, which is default, function shows video and writes it to file until end of record_time or Escape key or q is pressed. If save_video=False function only shows video and independent of record_time. """ start_time=time.time() last_time=start_time while True: k=cv2.waitKey(1) fps = round(1. / (time.time() - last_time + .00001)) last_time=time.time() m.update() frame=cam.get_frame_buffer() #TODO: look at just having the video object cv2.imshow('Live Video. Framerate={0}Hz. (Esc or q to exit)'.format(cam.frame_rate ), frame) print fps #TODO: write video at higher speed than rendering it if save_video: writer.write(frame) #TODO: Adapt writing speed to wanted video speed if time.time()>start_time+record_time: break #breaks the while loop such that cv2.imshow disappears if k in {27, ord('q')}: #Hit Escape Key (depending on OS, escape code might not be 27) or q to exit break if save_video: print "Wrote video to file" else: print "Did not write video to file"
def correct_orientation_motivepy(rb, n_attempts=3): import motive """Reset the orientation to account for between-session arena shifts""" for attempt in range(n_attempts): rb.reset_orientation() motive.update() additional_rotation = rotate_to_var(np.array(rb.point_cloud_markers)) return additional_rotation
def correct_orientation_motivepy(rb, n_attempts=3): import motive """Reset the orientation to account for between-session arena shifts""" for attempt in range(n_attempts): rb.reset_orientation() motive.update() additional_rotation = orienting.rotate_to_var(np.array(rb.point_cloud_markers)) return additional_rotation
def update_arena_position(dt): motive.update() arena.position.xyz = rb.location arena.rotation.xyzw = rb.rotation_quats arena.update() sphere.position.xyz = rb.location label.text = "aspect={}, fov_y={}, ({:2f}, {:2f}, {:2f}), ({:2f}, {:2f}, {:2f})".format(scene.camera.projection.aspect, scene.camera.projection.fov_y, *(arena.position.xyz + rb.location))
def trackbody(motive_filename, body): motive_filename = motive_filename.encode() motive.load_project(motive_filename) body = motive.get_rigid_bodies()[body] while True: motive.update() click.echo(body.location)
def update_body(dt, window, body): motive.update() window.mesh.rotation.xyzw = body.rotation_quats window.mesh.position.xyz = body.location # window.scene.camera.rotation.x += 15 * dt fmt_str = "loc: {:.1f}, {:.1f}, {:.1f}\t rot: {:.2f}, {:.2f}, {:.2f}, {:.2f}\nfov_y: {fov_y:.2f}\taspect: {aspect:.2f}\nfps: {fps:.1f}" window.label.text = fmt_str.format( *(body.location + body.rotation_quats), fov_y=window.scene.camera.projection.fov_y, aspect=window.scene.camera.projection.aspect, fps=1. / (dt + .00000001))
def detect_projection_point(self, dt): """Use Motive to detect the projected mesh in 3D space""" motive.flush_camera_queues() for el in range(2): motive.update() markers = motive.get_unident_markers() markers = [marker for marker in markers if 0.08 < marker[1] < 0.50] click.echo("{} markers detected.".format(len(markers))) self.marker_pos.extend(markers)
def detect_projection_point(self, dt): """Use Motive to detect the projected mesh in 3D space""" motive.flush_camera_queues() for el in range(2): motive.update() markers = motive.get_unident_markers() markers = [marker for marker in markers if marker[1] < 0.50 and marker[1] > 0.08] if len(markers) == 1: click.echo(markers) self.screen_pos.append([self.mesh.x, self.mesh.y]) self.marker_pos.append(markers[0])
def update_body(dt, body): motive.update() mesh.position.xyz = arena_body.location mesh.position.y -= .07 # mesh.rotation.xyzw = arena_body.rotation_quats # mesh.rotation.y += 10 * dt arena.position.xyz = arena_body.location arena.rotation.xyzw = arena_body.rotation_quats vr_scene.camera.position.xyz = body.location scene.camera.uniforms['playerPos'] = body.location
def ray_scan(window): circle = window.active_scene.meshes[0] circle.visible = True # Do some non-random points to so human can change height range. pointPos, screenPos = [], [] for pos in [(0, 0), (-.5, 0), (.5, 0)]: circle.x, circle.y = pos window.draw() window.flip() for _ in timers.countdown_timer(5, stop_iteration=True): motive.update() markers = motive.get_unident_markers() old_time = motive.frame_time_stamp() if motive.frame_time_stamp() > old_time + .3 and len(markers) == 1: if markers[0][1] > 0.1: screenPos.append(circle.position[:2]) pointPos.append(markers[0]) old_time = motive.frame_time_stamp() return screenPos, pointPos
def scan(pointwidth=.06): """Project a series of points onto the arena, collect their 3d position, and save them and the associated rigid body data into a pickled file.""" # Initialize Calibration Point Grid. wavefront_reader = rc.WavefrontReader(rc.resources.obj_primitives) mesh = wavefront_reader.get_mesh('Grid', scale=1.5, drawstyle='point', point_size=12, position=(0,0,-1)) # mesh.material.diffuse.rgb = 1, 1, 1 scene = rc.Scene([mesh], bgColor=(0,0,0)) scene.camera.ortho_mode = True window = visual.Window(screen=1, fullscr=True) # Main Loop old_frame, clock, points = motive.frame_time_stamp(), utils.timers.countdown_timer(3.), [] for theta in np.linspace(0, 2*np.pi, 40)[:-1]: # Update Screen scene.camera.position = (pointwidth * np.sin(theta)), (pointwidth * np.cos(theta)), -1 scene.draw() window.flip() # Collect New Tracker Data old_frame = motive.frame_time_stamp() while motive.frame_time_stamp() == old_frame: motive.flush_camera_queues() motive.update() # Collect 3D points from Tracker markers = motive.get_unident_markers() if markers: points.extend(markers) # Housekeeping window.close() # Data quality checks and return. return np.array(points)
def write_video(cam, writer, record_time, save_video=True): """ If save_video=True, which is default, function shows video and writes it to file until end of record_time or Escape key or q is pressed. If save_video=False function only shows video and independent of record_time. """ start_time = time.time() last_time = start_time while True: k = cv2.waitKey(1) fps = round(1. / (time.time() - last_time + .00001)) last_time = time.time() m.update() frame = cam.get_frame_buffer() #TODO: look at just having the video object cv2.imshow( 'Live Video. Framerate={0}Hz. (Esc or q to exit)'.format( cam.frame_rate), frame) print(fps) #TODO: write video at higher speed than rendering it if save_video: writer.write( frame) #TODO: Adapt writing speed to wanted video speed if time.time() > start_time + record_time: break #breaks the while loop such that cv2.imshow disappears if k in { 27, ord('q') }: #Hit Escape Key (depending on OS, escape code might not be 27) or q to exit break if save_video: print("Wrote video to file") else: print("Did not write video to file")
def random_scan(window, scene, n_points=300): circle = scene.root.children[0] screenPos, pointPos = [], [] collect_fmt, missed_fmt, missed_cnt = ", Points Collected: ", ", Points Missed: ", 0 pbar = pb.ProgressBar(widgets=[pb.Bar(), pb.ETA(), collect_fmt +'0', missed_fmt+'0'], maxval=n_points) pbar.start() while len(pointPos) < n_points and 'escape' not in event.getKeys(): # Update position of circle, and draw. circle.visible = True homogenous_pos = np.random.random(2) - .5 circle.x, circle.y = homogenous_pos * [1.8, 1] slow_draw(window, scene) motive.update() # Try to isolate a single point. for _ in timers.countdown_timer(.2, stop_iteration=True): motive.flush_camera_queues() motive.update() markers = motive.get_unident_markers() if markers and markers[0][1] > 0.: screenPos.append(circle.position[:2]) # Update Progress Bar pointPos.append(markers[0]) pbar.widgets[2] = collect_fmt + str(len(pointPos)) pbar.update(len(pointPos)) break else: # Update Progress bar missed_cnt += 1 pbar.widgets[3] = missed_fmt + str(missed_cnt) pbar.update(len(pointPos)) # Hide circle, and wait again for a new update. circle.visible = False slow_draw(window, scene) motive.update() motive.flush_camera_queues() while len(motive.get_unident_markers()) > 0: motive.update() motive.flush_camera_queues() return np.array(screenPos), np.array(pointPos)
def view_arenafit(motive_filename, projector_filename, arena_filename, screen): # """Displays mesh in .obj file. Useful for checking that files are rendering properly.""" reader = rc.WavefrontReader(arena_filename) arena = reader.get_mesh('Arena', mean_center=True) arena.rotation = arena.rotation.to_quaternion() print('Arena Loaded. Position: {}, Rotation: {}'.format(arena.position, arena.rotation)) camera = rc.Camera.from_pickle(projector_filename) camera.projection.fov_y = 39 light = rc.Light(position=(camera.position.xyz)) root = rc.EmptyEntity() root.add_child(arena) sphere = rc.WavefrontReader(rc.resources.obj_primitives).get_mesh('Sphere', scale=.05) root.add_child(sphere) scene = rc.Scene(meshes=root, camera=camera, light=light, bgColor=(.2, .4, .2)) scene.gl_states = scene.gl_states[:-1] display = pyglet.window.get_platform().get_default_display() screen = display.get_screens()[screen] window = pyglet.window.Window(fullscreen=True, screen=screen) label = pyglet.text.Label() fps_display = FPSDisplay(window) shader = rc.Shader.from_file(*rc.resources.genShader) @window.event def on_draw(): with shader: scene.draw() # label.draw() # window.clear() fps_display.draw() @window.event def on_resize(width, height): camera.projection.aspect = float(width) / height @window.event def on_key_release(sym, mod): if sym == key.UP: scene.camera.projection.fov_y += .5 elif sym == key.DOWN: scene.camera.projection.fov_y -= .5 import motive motive.initialize() motive.load_project(motive_filename.encode()) motive.update() rb = motive.get_rigid_bodies()['Arena'] # for el in range(3): # rb.reset_orientation() def update_arena_position(dt): motive.update() arena.position.xyz = rb.location arena.rotation.xyzw = rb.rotation_quats arena.update() sphere.position.xyz = rb.location label.text = "aspect={}, fov_y={}, ({:2f}, {:2f}, {:2f}), ({:2f}, {:2f}, {:2f})".format(scene.camera.projection.aspect, scene.camera.projection.fov_y, *(arena.position.xyz + rb.location)) pyglet.clock.schedule(update_arena_position) pyglet.app.run()
def vr_demo(motive_filename, projector_filename, arena_filename, body, screen): motive.initialize() motive_filename = motive_filename.encode() motive.load_project(motive_filename) body = motive.get_rigid_bodies()[body] for el in range(3): body.reset_orientation() motive.update() arena_body = motive.get_rigid_bodies()['Arena'] # Load projector's Camera object, created from the calib_projector ratcave_utils CLI tool. display = pyglet.window.get_platform().get_default_display() screen = display.get_screens()[screen] window = pyglet.window.Window(fullscreen=False, screen=screen, vsync=False) fbo = rc.FBO(rc.TextureCube(width=4096, height=4096)) shader3d = rc.Shader.from_file(*rc.resources.genShader) reader = rc.WavefrontReader(rc.resources.obj_primitives) mesh = reader.get_mesh('Monkey', scale=.022, position=(0, 0, .3)) mesh.position.y = .6 mesh.uniforms['ambient'] = .15, .15, .15 # mesh.rotation = mesh.rotation.to_quaternion() arena = rc.WavefrontReader(arena_filename.encode()).get_mesh('Arena') arena.rotation = arena.rotation.to_quaternion() arena.texture = fbo.texture floor = reader.get_mesh('Plane', scale=50, position=(0, 0, 0), mean_center=True) # floor.scale.x = 20 floor.rotation.x = -90 floor.texture = fbo.texture # floor.rotation.x = 180 vr_scene = rc.Scene(meshes=[mesh], bgColor=(0., .3, 0)) vr_scene.camera.projection.fov_y = 90 vr_scene.camera.projection.aspect = 1. vr_scene.camera.projection.z_near = .005 vr_scene.camera.projection.z_far = 2. fbo_aa = rc.FBO(rc.Texture(width=4096, height=4096, mipmap=True)) quad = rc.gen_fullscreen_quad() quad.texture = fbo_aa.texture shader_deferred = rc.Shader.from_file(*rc.resources.deferredShader) scene = rc.Scene(meshes=[arena, floor], bgColor=(0., 0., .3)) camera = rc.Camera.from_pickle(projector_filename.encode()) camera.projection.fov_y = 39 scene.camera = camera scene.light.position.xyz = camera.position.xyz vr_scene.light.position.xyz = camera.position.xyz @window.event def on_draw(): with shader3d: with fbo: vr_scene.camera.projection.match_aspect_to_viewport() vr_scene.draw360_to_texture(fbo.texture) scene.camera.projection.match_aspect_to_viewport() with fbo_aa: scene.draw() with shader_deferred: quad.draw() def update_body(dt, body): motive.update() mesh.position.xyz = arena_body.location mesh.position.y -= .07 # mesh.rotation.xyzw = arena_body.rotation_quats # mesh.rotation.y += 10 * dt arena.position.xyz = arena_body.location arena.rotation.xyzw = arena_body.rotation_quats vr_scene.camera.position.xyz = body.location scene.camera.uniforms['playerPos'] = body.location pyglet.clock.schedule(update_body, body) pyglet.app.run()
def display(optitrack_ip="127.0.0.1", calib_object_name=''): # Connect to Optitrack # tracker = ratcave.devices.Optitrack(client_ip=optitrack_ip) # Create Arena and cube reader = WavefrontReader(ratcave.graphics.resources.obj_arena) arena = reader.get_mesh('Arena', lighting=True, centered=False) arena.load_texture(ratcave.graphics.resources.img_colorgrid) meshes = [arena] if calib_object_name: reader = WavefrontReader(ratcave.graphics.resources.obj_primitives) cube = reader.get_mesh('Sphere', lighting=True, scale=.02, centered=True) meshes.append(cube) # Create Scene and Window scene = Scene(meshes) scene.camera = projector scene.camera.fov_y = 27.8 scene.light.position = scene.camera.position window = Window(scene, screen=1, fullscr=True) # Update Everything's Position # arena.world.position = tracker.rigid_bodies['Arena'].position # arena.world.rotation = tracker.rigid_bodies['Arena'].rotation_pca_y # print(motive.get_rigid_bodies()) arena_rb = motive.get_rigid_bodies()['Arena'] arena.world.position = arena_rb.location arena.world.rotation = arena_rb.rotation motive.update() for attempt in range(3): arena_rb.reset_orientation() motive.update() markers = np.array(arena_rb.point_cloud_markers) additional_rotation = rotate_to_var(markers) # Print the Following every time a key is detected: print "Camera settings:\n -shift: {0}, {1}\n -position: {2}\n -fov_y(xz): {3}\n -rotation: {4}\n\n".format( scene.camera.x_shift, scene.camera.y_shift, scene.camera.position, scene.camera.fov_y, scene.camera.rotation) print "Arena settings:\n -local\n\tPosition: {}\n\tRotation: {}\n -world\n\tPosition: {}\n\tRotation: {}".format( arena.local.position, arena.local.rotation, arena.world.position, arena.world.rotation) aa = 0 while True: # Update Everything's Position motive.update() arena.world.position = arena_rb.location arena.world.rotation = arena_rb.rotation_global arena.world.rotation[1] += additional_rotation arena.world.rotation[1] += aa # If there's another object to track, then track it. if calib_object_name: cube.local.position = motive.get_rigid_bodies()[calib_object_name].location # Re-Draw Everything window.draw() window.flip() keys = event.getKeys() if 'escape' in keys: window.close() break elif 'up' in keys: scene.camera.fov_y += .1 print('fov_y: {}'.format(scene.camera.fov_y)) elif 'down' in keys: scene.camera.fov_y -= .1 print('fov_y: {}'.format(scene.camera.fov_y)) elif 'space' in keys: aa += 180
parser.add_argument('-r', action='store', dest='rigid_body_name', default='', help='Name of the Arena rigid body. If only one rigid body is present, unnecessary--that one will be used automatically.') parser.add_argument('-i', action='store', dest='motive_projectfile', default=motive.utils.backup_project_filename, help='Name of the motive project file to load. If not used, will load most recent Project file loaded in MotivePy.') args = parser.parse_args() # Select Rigid Body to track. motive.load_project(args.motive_projectfile) print("Loaded Motive Project: {}".format(args.motive_projectfile)) hardware.motive_camera_vislight_configure() print("Camera Settings changed to Detect Visible light:") print("\t"+"\n\t".join(['{}: FPS={}, Gain={}, Exp.={}, Thresh.={}'.format(cam.name, cam.frame_rate, cam.image_gain, cam.exposure, cam.threshold) for cam in motive.get_cams()])) motive.update() rigid_bodies = motive.get_rigid_bodies() try: if not args.rigid_body_name: assert len(rigid_bodies) == 1, "Only one rigid body should be present for auto-selection. Please use the -r flag to specify a rigid body name to track for the arena." arena_name = args.rigid_body_name if args.rigid_body_name in rigid_bodies else rigid_bodies.keys()[0] except IndexError: raise IndexError("No Rigid Bodies found in Optitrack tracker.") except KeyError: raise KeyError("Rigid Body '{}' not found in list of Optitrack Rigid Bodies.".format(arena_name)) print('Arena Name: {}. N Markers: {}'.format(arena_name, len(rigid_bodies[arena_name].markers))) assert len(rigid_bodies[arena_name].markers) > 5, "At least 6 markers in the arena's rigid body is required" # TODO: Fix bug that requires scanning be done in original orientation (doesn't affect later recreation, luckily.)
def trackposition(motive_filename, projector_filename, body, screen): motive.initialize() motive_filename = motive_filename.encode() motive.load_project(motive_filename) body = motive.get_rigid_bodies()[body] for el in range(3): body.reset_orientation() motive.update() # Load projector's Camera object, created from the calib_projector ratcave_utils CLI tool. display = pyglet.window.get_platform().get_default_display() screen = display.get_screens()[screen] window = RotationWindow(fullscreen=True, screen=screen) keys = key.KeyStateHandler() window.push_handlers(keys) camera = rc.Camera.from_pickle(projector_filename.encode()) # camera.projection.fov_y = 38.5 window.scene.camera = camera window.scene.light.position.xyz = camera.position.xyz shader = rc.Shader.from_file(*rc.resources.genShader) @window.event def on_draw(): with shader: window.scene.draw() window.label.draw() def update_body(dt, window, body): motive.update() window.mesh.rotation.xyzw = body.rotation_quats window.mesh.position.xyz = body.location # window.scene.camera.rotation.x += 15 * dt fmt_str = "loc: {:.1f}, {:.1f}, {:.1f}\t rot: {:.2f}, {:.2f}, {:.2f}, {:.2f}\nfov_y: {fov_y:.2f}\taspect: {aspect:.2f}\nfps: {fps:.1f}" window.label.text = fmt_str.format( *(body.location + body.rotation_quats), fov_y=window.scene.camera.projection.fov_y, aspect=window.scene.camera.projection.aspect, fps=1. / (dt + .00000001)) def update_fov(dt): camera.projection.aspect = window.width / float(window.height) camera.projection.update() speed = 10. # How fast to change values on keyboard hold. if keys[key.UP]: camera.projection.fov_y += speed * dt camera.projection.update() if keys[key.DOWN]: camera.projection.fov_y -= speed * dt camera.projection.update() if keys[key.LEFT]: camera.projection.aspect += speed * dt camera.projection.update() if keys[key.RIGHT]: camera.projection.aspect -= speed * dt camera.projection.update() pyglet.clock.schedule(update_fov) pyglet.clock.schedule(update_body, window, body) pyglet.app.run()
def scan_arena(motive_filename, output_filename, body, nomeancenter, nopca, nsides): """Runs Arena Scanning algorithm.""" output_filename = output_filename + '.obj' if not path.splitext( output_filename)[1] else output_filename assert path.splitext(output_filename)[ 1] == '.obj', "Output arena filename must be a Wavefront (.obj) file" # Load Motive Project File motive_filename = motive_filename.encode() motive.load_project(motive_filename) hardware.motive_camera_vislight_configure() motive.update() # Get Arena's Rigid Body rigid_bodies = motive.get_rigid_bodies() assert body in rigid_bodies, "RigidBody {} not found in project file. Available body names: {}".format( body, list(rigid_bodies.keys())) assert len( rigid_bodies[body].markers ) > 5, "At least 6 markers in the arena's rigid body is required. Only {} found".format( len(rigid_bodies[body].markers)) # TODO: Fix bug that requires scanning be done in original orientation (doesn't affect later recreation, luckily.) for attempt in range( 3): # Sometimes it doesn't work on the first try, for some reason. rigid_bodies[body].reset_orientation() motive.update() if sum(np.abs(rigid_bodies[body].rotation)) < 1.: break else: raise ValueError( "Rigid Body Orientation not Resetting to 0,0,0 after 3 attempts. This happens sometimes (bug), please just run the script again." ) # Scan points display = pyglet.window.get_platform().get_default_display() screen = display.get_screens()[1] window = GridScanWindow(screen=screen, fullscreen=True) pyglet.app.run() points = np.array(window.marker_pos) assert ( len(points) > 100 ), "Only {} points detected. Tracker is not detecting enough points to model. Is the projector turned on?".format( len(points)) assert points.ndim == 2 # Rotate all points to be mean-centered and aligned to Optitrack Markers direction or largest variance. markers = np.array(rigid_bodies[body].point_cloud_markers) if not nomeancenter: points -= np.mean(markers, axis=0) if not nopca: points = np.dot( points, rotation_matrix(np.radians(orienting.rotate_to_var(markers)), [0, 1, 0])[:3, :3]) # Plot preview of data collected fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(*points.T) plt.show() # Get vertex positions and normal directions from the collected data. vertices, normals = pointcloud.meshify(points, n_surfaces=nsides) vertices = { wall: pointcloud.fan_triangulate(pointcloud.reorder_vertices(verts)) for wall, verts in vertices.items() } # Triangulate # Write wavefront .obj file to app data directory and user-specified directory for importing into Blender. wave_str = pointcloud.to_wavefront(body, vertices, normals) with open(output_filename, 'wb') as wavfile: wavfile.write(wave_str) # Show resulting plot with points and model in same place. fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(*points[::12, :].T) for idx, verts in vertices.items(): ax.plot(*np.vstack((verts, verts[0, :])).T) plt.show()
def scan_arena(motive_filename, output_filename, body, nomeancenter, nsides, screen): """Runs Arena Scanning algorithm.""" output_filename = output_filename + '.obj' if not path.splitext(output_filename)[1] else output_filename assert path.splitext(output_filename)[1] == '.obj', "Output arena filename must be a Wavefront (.obj) file" # Load Motive Project File motive_filename = motive_filename.encode() motive.initialize() motive.load_project(motive_filename) # Get old camera settings before changing them, to go back to them before saving later. cam_settings = [cam.settings for cam in motive.get_cams()] frame_rate_old = motive.get_cams()[0].frame_rate hardware.motive_camera_vislight_configure() motive.update() # Get Arena's Rigid Body rigid_bodies = motive.get_rigid_bodies() assert body in rigid_bodies, "RigidBody {} not found in project file. Available body names: {}".format(body, list(rigid_bodies.keys())) # assert len(rigid_bodies[body].markers) > 5, "At least 6 markers in the arena's rigid body is required. Only {} found".format(len(rigid_bodies[body].markers)) for el in range(3): rigid_bodies[body].reset_orientation() rigid_bodies[body].reset_pivot_offset() motive.update() assert np.isclose(np.array(rigid_bodies[body].rotation), 0).all(), "Orientation didn't reset." assert np.isclose(np.array(rigid_bodies[body].location), np.mean(rigid_bodies[body].point_cloud_markers, axis=0)).all(), "Pivot didn't reset." print("Location and Orientation Successfully reset.") # Scan points display = pyglet.window.get_platform().get_default_display() screen = display.get_screens()[screen] window = GridScanWindow(screen=screen, fullscreen=True) pyglet.app.run() points = np.array(window.marker_pos) assert(len(points) > 100), "Only {} points detected. Tracker is not detecting enough points to model. Is the projector turned on?".format(len(points)) assert points.ndim == 2 # Plot preview of data collected fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(*points.T) plt.show() # Get vertex positions and normal directions from the collected data. vertices, normals = pointcloud.meshify_arena(points, n_surfaces=nsides) vertices, face_indices = pointcloud.face_index(vertices) face_indices = pointcloud.fan_triangulate(face_indices) # Reapply old camera settings, then save. for setting, cam in zip(cam_settings, motive.get_cams()): cam.settings = setting cam.image_gain = 1 cam.frame_rate = frame_rate_old if 'Prime 13' in cam.name: cam.set_filter_switch(True) motive.update() # me if not nomeancenter: # import ipdb # ipdb.set_trace() vertmean = np.mean(vertices[face_indices.flatten(), :], axis=0) # vertmean = np.array([np.mean(np.unique(verts)) for verts in vertices.T]) # to avoid counting the same vertices twice. vertices -= vertmean points -= vertmean print('Old Location: {}'.format(rigid_bodies[body].location)) arena = rigid_bodies[body] for attempt in range(300): print('Trying to Set New Rigid Body location, attempt {}...'.format(attempt)) arena.reset_pivot_offset() arena.location = vertmean if np.isclose(arena.location, vertmean, rtol=.001).all(): break else: raise ValueError('Motive failed to properly shift pivot to center of mesh') print('Vertex Mean: {}'.format(vertmean)) print('New Location: {}'.format(arena.location)) # Write wavefront .obj file to app data directory and user-specified directory for importing into Blender. writer = WavefrontWriter.from_indexed_arrays(body, vertices, normals, face_indices) with open(output_filename, 'w') as f: writer.dump(output_filename) # Show resulting plot with points and model in same place. fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(*points[::12, :].T) ax.scatter(*vertices.T, c='r') plt.show() # motive.save_project(motive_filename) motive.save_project(path.splitext(motive_filename)[0]+'_scanned.ttp')
def update_body(dt, window, body): motive.update() window.dot.position.xyz = body.location
def show_viewer(): """Creates a widget showing a live video plotting the 3D position of unidentified markers (light blue) and rigid body markers, with a grey grid denoting the floor """ # Create Qt Window app = QtGui.QApplication([]) # create the graphing application w = gl.GLViewWidget() # create widget w.opts['distance'] = 4 # start distance from where one looks at the plot w.setFixedSize(1100, 800) # Initialize/Set unidentified_markers plot with dummy data. unident_markers = gl.GLScatterPlotItem(pos=np.array([[0, 0, 0]]), color=(204/255, 1, 1, 0.8), size=6) w.addItem(unident_markers) # Initialize the Rigid Body Scatterplot and assign plot color to the rigid bodies m.update() rigs = m.get_rigid_bodies() color_dict = {'Red': (1., 0., 0.), 'Green': (0., 1., 0.), 'Yellow': (1., 1., 0.), 'Mag.': (1., 0., 1.), 'Orange': (1., .4, 0.)} for rig, color_name in zip(rigs.values(), itertools.cycle(color_dict)): rig.color_name = color_name w.addItem(gl.GLScatterPlotItem(pos=np.array([[0, 0, 0]]), color=color_dict[color_name] + (1.,), size=8)) # Make floor rectangle grid_points = np.linspace(-1, 1, 200) points_2d = np.array(list(itertools.product(grid_points, grid_points))) points_3d = np.insert(points_2d, 1, 0, axis=1) w.addItem(gl.GLScatterPlotItem(pos=points_3d, color=(0.5, 0.5, 0.5, 0.3), size=0.1)) # Rotate Everything so Y axis is up when plotted. [item.rotate(90, 1, 0, 0) for item in w.items] # Show widget (for different backgroundcolor see PyQtshowmarkers.py) w.show() # Main Draw Loop (as generator) def update_generator(): last_time= time.time() rig_data = ', '.join(['{0}: {1}'.format(body.name, body.color_name) for body in rigs.values()]) while True: m.update() # Update Title with new FPS try: fps = round(1. / (time.time() - last_time)) last_time = time.time() w.setWindowTitle('MotivePy Viewer. Rigid Bodies = {{{rigid_bodies}}}. Update Rate: {fps} fps'.format(rigid_bodies = rig_data, fps=fps)) except ZeroDivisionError: pass # Plot markers = m.get_unident_markers() if markers: unident_markers.setData(pos=np.array(m.get_unident_markers())) for rig, scat in zip(rigs.values(), w.items[1:-1]): scat.setData(pos=np.array(rig.point_cloud_markers)) # Return Nothing yield t = QtCore.QTimer() t.timeout.connect(update_generator().next) t.start(2) # Start Viewer App QtGui.QApplication.instance().exec_()