def view_mesh(body, obj_filename): # """Displays mesh in .obj file. Useful for checking that files are rendering properly.""" reader = rc.WavefrontReader(obj_filename) mesh = reader.get_mesh(body, position=(0, 0, -1)) print(mesh.vertices.shape) mesh.scale.x = .2 / np.ptp(mesh.vertices, axis=0).max() camera = rc.Camera(projection=rc.PerspectiveProjection(fov_y=20)) light = rc.Light(position=(camera.position.xyz)) scene = rc.Scene(meshes=[mesh], camera=camera, light=light, bgColor=(.2, .4, .2)) scene.gl_states = scene.gl_states[:-1] display = pyglet.window.get_platform().get_default_display() screen = display.get_screens()[0] window = pyglet.window.Window(fullscreen=True, screen=screen) fbo = rc.FBO(rc.Texture(width=4096, height=4096)) quad = rc.gen_fullscreen_quad() quad.texture = fbo.texture label = pyglet.text.Label() @window.event def on_draw(): with rc.resources.genShader, fbo: scene.draw() with rc.resources.deferredShader: quad.draw() verts_mean = np.ptp(mesh.vertices, axis=0) label.text = 'Name: {}\nRotation: {}\nSize: {} x {} x {}'.format( mesh.name, mesh.rotation, verts_mean[0], verts_mean[1], verts_mean[2]) label.draw() @window.event def on_resize(width, height): camera.projection.aspect = float(width) / height @window.event def on_mouse_motion(x, y, dx, dy): x, y = x / float(window.width) - .5, y / float(window.height) - .5 mesh.rotation.x = -360 * y mesh.rotation.y = 360 * x pyglet.app.run()
def main(): FULLSCREEN = True reader = rc.WavefrontReader('resources/maze.obj') arena = reader.get_mesh('Cube') arena.textures.append(rc.Texture.from_image(rc.resources.img_uvgrid)) sphere = reader.get_mesh('Sphere') # , position=(0, 0, -1)) cylinder = reader.get_mesh('Cylinder') # , position=(0, 0, -1)) player = rc.Camera(projection=rc.PerspectiveProjection(z_near=.001, z_far=4.5), position=(0, .3, 0)) player.rotation.axes = 'sxyz' scene = rc.Scene(meshes=[arena, sphere, cylinder], camera=player, bgColor=(1., 0., 0.)) #scene.gl_states = scene.gl_states[:-1] window = FPSGame(player=player, scene=scene, fullscreen=FULLSCREEN) pyglet.app.run()
obj_reader = rc.WavefrontReader(obj_filename2) # Create Mesh thor = obj_reader.get_mesh("thor") # Seta a posição inicial, escala, ? thor.position.xyz = 0, 0, -2 scale = .1 thor.scale = scale thor.uniforms['diffuse'] = [.5, .0, .8] # Cria a cena scene = rc.Scene(meshes=[thor]) scene.bgColor = 0.4, 0.2, 0.4 scene.light.position = 0, 3, -1 # Cria a camera camera = rc.Camera(projection=rc.PerspectiveProjection(fov_y=90, aspect=1.)) scene.camera = camera #Faz a projeção da cena projected_scene = rc.Scene(meshes=[thor], bgColor=(1., 1., 1.)) projected_scene.light.position = scene.light.position projected_scene.camera = rc.Camera(position=(0, 0, 5), rotation=(0, 0, 0)) projected_scene.camera.projection.z_far = 50 # Atualiza a movimentação da camera e do objeto def move_camera(dt): global scale global projected_scene camera_speed = 1 if keys[key.W]: thor.scale.x += 0.1
window = pyglet.window.Window(resizable=True) # Assemble the Virtual Scene obj_reader = rc.WavefrontReader(rc.resources.obj_primitives) sphere = obj_reader.get_mesh("Sphere", position=(0, 0, 2), scale=0.2) sphere.uniforms['diffuse'] = 1, 0, 0 cube = obj_reader.get_mesh("Cube", position=(0, 0, 0), scale=0.2) cube.uniforms['diffuse'] = 1, 1, 0 # virtual_scene = rc.Scene(meshes=[sphere, cube], bgColor=(0., 0., 1.)) virtual_scene = rc.Scene(meshes=[cube, sphere], bgColor=(0., 0., 1.)) virtual_scene.light.position.xyz = 0, 3, -1 cube_camera = rc.Camera( projection=rc.PerspectiveProjection(fov_y=90, aspect=1.)) virtual_scene.camera = cube_camera # Assemble the Projected Scene monkey = obj_reader.get_mesh("Monkey", position=(0, 0, -1), scale=0.8) screen = obj_reader.get_mesh("Plane", position=(0, 0, 1), rotation=(1.5, 180, 0)) projected_scene = rc.Scene(meshes=[monkey, screen, sphere, cube], bgColor=(1., .5, 1.)) projected_scene.light.position = virtual_scene.light.position projected_scene.camera = rc.Camera(position=(0, 4, 0), rotation=(-90, 0, 0)) projected_scene.camera.projection.z_far = 6 # Create Framebuffer and Textures
def main(): #gettign positions of rigib bodies in real time client = NatClient() arena_rb = client.rigid_bodies['Arena'] rat_rb = client.rigid_bodies['Rat'] window = pyglet.window.Window(resizable=True, fullscreen=True, screen=get_screen(1)) # Opening the basic pyglet window # Load Arena remove_image_lines_from_mtl('assets/3D/grass_scene.mtl') arena_filename = 'assets/3D/grass_scene.obj'# we are taking an arena which has been opened in blender and rendered to 3D after scanning it does not have flipped normals arena_reader = rc.WavefrontReader(arena_filename) # loading the mesh of the arena thought a wavefrontreader arena = arena_reader.get_mesh("Arena", position=arena_rb.position) # making the wafrotn into mesh so we can extrude texture ont top of it. arena.uniforms['diffuse'] = 1., 1., 1. # addign a white diffuse material to the arena arena.rotation = arena.rotation.to_quaternion() # we also need to get arena's rotation not just xyz so it can be tracked and moved if it gets bumped # Load the projector as a Ratcave camera, set light to its position projector = rc.Camera.from_pickle('assets/3D/projector.pkl') # settign the pickle filled of the projector, which gives us the coordinates of where the projector is projector.position.x += .004 projector.projection = rc.PerspectiveProjection(fov_y =40.5, aspect=1.777777778) light = rc.Light(position=projector.position) ## Make Virtual Scene ## fields = [] for x, z in itertools.product([-.8, 0, .8], [-1.6, 0, 1.6]): field = load_textured_mesh(arena_reader, 'grass', 'grass.png') field.position.x += x field.position.z += z fields.append(field) ground = load_textured_mesh(arena_reader, 'Ground', 'dirt.png') sky = load_textured_mesh(arena_reader, 'Sky', 'sky.png') snake = load_textured_mesh(arena_reader, 'Snake', 'snake.png') rat_camera = rc.Camera(projection=rc.PerspectiveProjection(aspect=1, fov_y=90, z_near=.001, z_far=10), position=rat_rb.position) # settign the camera to be on top of the rats head meshes = [ground, sky, snake] + fields for mesh in meshes: mesh.uniforms['diffuse'] = 1., 1., 1. mesh.uniforms['flat_shading'] = False mesh.parent = arena virtual_scene = rc.Scene(meshes=meshes, light=light, camera=rat_camera, bgColor=(0, 0, 255)) # seetign aset virtual scene to be projected as the mesh of the arena virtual_scene.gl_states.states = virtual_scene.gl_states.states[:-1] ## Make Cubemapping work on arena cube_texture = rc.TextureCube(width=4096, height=4096) # usign cube mapping to import eh image on the texture of the arena framebuffer = rc.FBO(texture=cube_texture) ## creating a fr`amebuffer as the texture - in tut 4 it was the blue screen arena.textures.append(cube_texture) # Stereo vr_camgroup = rc.StereoCameraGroup(distance=.05) vr_camgroup.rotation = vr_camgroup.rotation.to_quaternion() # updating the posiotn of the arena in xyz and also in rotational perspective def update(dt): """main update function: put any movement or tracking steps in here, because it will be run constantly!""" vr_camgroup.position, vr_camgroup.rotation.xyzw = rat_rb.position, rat_rb.quaternion # setting the actual osiont of the rat camera to vbe of the rat position arena.uniforms['playerPos'] = rat_rb.position arena.position, arena.rotation.xyzw = arena_rb.position, arena_rb.quaternion arena.position.y -= .02 pyglet.clock.schedule(update) # making it so that the app updates in real time @window.event def on_draw(): ## Render virtual scene onto cube texture with framebuffer: with cube_shader: for mask, camside in zip([(True, False, False, True), (False, True, True, True)], [vr_camgroup.left, vr_camgroup.right]): gl.glColorMask(*mask) virtual_scene.camera.position.xyz = camside.position_global virtual_scene.draw360_to_texture(cube_texture) ## Render real scene onto screen gl.glColorMask(True, True, True, True) window.clear() with cube_shader: # usign cube shader to create the actuall 6 sided virtual cube which gets upated with position and angle of the camera/viewer rc.clear_color(255, 0, 0) # why is it here 39? e with projector, light: arena.draw() # actually run everything. pyglet.app.run()
def debug_view(vertices, texcoord, image=None, window_size=(800, 600)): # creates the window and sets its properties width, height = window_size window = pyglet.window.Window(width=width, height=height, caption='Debug Viewer', resizable=False) num_verts = 3 * vertices.shape[0] model = ratcave.Mesh(arrays=(vertices.reshape(num_verts, 3), texcoord.reshape(num_verts, 2))) model.position.xyz = 0, 0, -10 if image is not None: image = image.transpose(PIL.Image.FLIP_TOP_BOTTOM) imgdata = pyglet.image.ImageData(image.width, image.height, 'RGBA', image.tobytes()) mipmap = False tex = imgdata.get_mipmapped_texture( ) if mipmap else imgdata.get_texture() pyglet.gl.glBindTexture(pyglet.gl.GL_TEXTURE_2D, 0) model.textures.append( ratcave.Texture(id=tex.id, data=tex, mipmap=mipmap)) scene = ratcave.Scene(meshes=[model]) scene.camera.projection = ratcave.PerspectiveProjection(60.0, width / float(height), z_far=100.0) def update(dt): pass pyglet.clock.schedule(update) shader = ratcave.Shader(vert=vert_shader, frag=frag_shader) @window.event def on_resize(width, height): # TODO update scene.camera.projection.viewport scene.camera.projection.aspect = width / float(height) return pyglet.event.EVENT_HANDLED @window.event def on_draw(): with shader: scene.draw() @window.event def on_mouse_scroll(x, y, scroll_x, scroll_y): # scroll the MOUSE WHEEL to zoom scene.camera.position.z -= scroll_y / 10.0 @window.event def on_mouse_drag(x, y, dx, dy, button, modifiers): # press the LEFT MOUSE BUTTON to rotate if button == pyglet.window.mouse.LEFT: model.rotation.y += dx / 5.0 model.rotation.x -= dy / 5.0 # press the LEFT and RIGHT MOUSE BUTTONS simultaneously to pan if button == pyglet.window.mouse.LEFT | pyglet.window.mouse.RIGHT: scene.camera.position.x -= dx / 100.0 scene.camera.position.y -= dy / 100.0 # starts the application pyglet.app.run()
def get_cubecamera(z_near=.004, z_far=1.5): """Returns a ratcave.Camera instance with fov_y=90 and aspect=1. Useful for dynamic cubemapping.""" camera = rc.Camera(projection=rc.PerspectiveProjection( fov_y=90., aspect=1., z_near=z_near, z_far=z_far)) camera.rotation = camera.rotation.to_quaternion() return camera
arena_filename = 'calibration_assets/arena2.obj' # we are taking an arena which has been opened in blender and rendered to 3D after scanning it does not have flipped normals arena_reader = rc.WavefrontReader( arena_filename) # loading the mesh of the arena thought a wavefrontreader arena = arena_reader.get_mesh( "Arena", position=arena_rb.position ) # making the wafrotn into mesh so we can extrude texture ont top of it. arena.uniforms[ 'diffuse'] = 1., 1., 1. # addign a white diffuse material to the arena arena.rotation = arena.rotation.to_quaternion( ) # we also need to get arena's rotation not just xyz so it can be tracked and moved if it gets bumped # Load the projector as a Ratcave camera, set light to its position projector = rc.Camera.from_pickle( 'calibration_assets/projector.pkl' ) # settign the pickle filled of the projector, which gives us the coordinates of where the projector is projector.projection = rc.PerspectiveProjection(fov_y=41.5, aspect=1.777777778) light = rc.Light(position=projector.position) ## Make Virtual Scene ## virtual_arena = arena_reader.get_mesh('Arena') wall = arena_reader.get_mesh("Plane") wall.parent = virtual_arena rat_camera = rc.Camera(projection=rc.PerspectiveProjection(aspect=1, fov_y=90, z_near=.001), position=rat_rb.position ) # settign the camera to be on top of the rats head virtual_scene = rc.Scene( meshes=[wall, virtual_arena], light=light, camera=rat_camera,