Beispiel #1
0
def blit_fbo(width, height, src_id, target_id, target_image=gl.GL_BACK):
    # For drawing a multisampled FBO to a non-multisampled FBO or to the
    # screen. See
    # https://www.khronos.org/opengl/wiki/Multisampling#Allocating_a_Multisample_Render_Target
    gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, src_id)
    gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, target_id)
    gl.glDrawBuffer(target_image)
    gl.glBlitFramebuffer(0, 0, width, height, 0, 0, width, height,
                         gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)
Beispiel #2
0
def blitFramebuffer(srcRect, dstRect=None, filter=GL.GL_LINEAR):
    """Copy a block of pixels between framebuffers via blitting. Read and draw
    framebuffers must be bound prior to calling this function. Beware, the
    scissor box and viewport are changed when this is called to dstRect.

    Parameters
    ----------
    srcRect : :obj:`list` of :obj:`int`
        List specifying the top-left and bottom-right coordinates of the region
        to copy from (<X0>, <Y0>, <X1>, <Y1>).
    dstRect : :obj:`list` of :obj:`int` or :obj:`None`
        List specifying the top-left and bottom-right coordinates of the region
        to copy to (<X0>, <Y0>, <X1>, <Y1>). If None, srcRect is used for
        dstRect.
    filter : :obj:`int`
        Interpolation method to use if the image is stretched, default is
        GL_LINEAR, but can also be GL_NEAREST.

    Returns
    -------
    None

    Examples
    --------
    # bind framebuffer to read pixels from
    GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, srcFbo)

    # bind framebuffer to draw pixels to
    GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, dstFbo)

    gltools.blitFramebuffer((0,0,800,600), (0,0,800,600))

    # unbind both read and draw buffers
    GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)

    """
    # in most cases srcRect and dstRect will be the same.
    if dstRect is None:
        dstRect = srcRect

    # GL.glViewport(*dstRect)
    # GL.glEnable(GL.GL_SCISSOR_TEST)
    # GL.glScissor(*dstRect)
    GL.glBlitFramebuffer(
        srcRect[0],
        srcRect[1],
        srcRect[2],
        srcRect[3],
        dstRect[0],
        dstRect[1],
        dstRect[2],
        dstRect[3],
        GL.GL_COLOR_BUFFER_BIT,  # colors only for now
        filter)
  def _render_img(self, width, height, multi_fbo, final_fbo, img_array, top_down=True):
    """
    Render an image of the environment into a frame buffer
    Produce a numpy RGB array image as output
    """

    if not self.graphics:
      return

    # Switch to the default context
    # This is necessary on Linux nvidia drivers
    # pyglet.gl._shadow_window.switch_to()
    self.shadow_window.switch_to()

    from pyglet import gl
    # Bind the multisampled frame buffer
    gl.glEnable(gl.GL_MULTISAMPLE)
    gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, multi_fbo)
    gl.glViewport(0, 0, width, height)

    # Clear the color and depth buffers

    c0, c1, c2 = self.horizon_color
    gl.glClearColor(c0, c1, c2, 1.0)
    gl.glClearDepth(1.0)
    gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)

    # Set the projection matrix
    gl.glMatrixMode(gl.GL_PROJECTION)
    gl.glLoadIdentity()
    gl.gluPerspective(
      self.cam_fov_y,
      width / float(height),
      0.04,
      100.0
    )

    # Set modelview matrix
    # Note: we add a bit of noise to the camera position for data augmentation
    pos = self.cur_pos
    angle = self.cur_angle
    if self.domain_rand:
      pos = pos + self.randomization_settings['camera_noise']

    x, y, z = pos + self.cam_offset
    dx, dy, dz = self.get_dir_vec(angle)
    gl.glMatrixMode(gl.GL_MODELVIEW)
    gl.glLoadIdentity()

    if self.draw_bbox:
      y += 0.8
      gl.glRotatef(90, 1, 0, 0)
    elif not top_down:
      y += self.cam_height
      gl.glRotatef(self.cam_angle[0], 1, 0, 0)
      gl.glRotatef(self.cam_angle[1], 0, 1, 0)
      gl.glRotatef(self.cam_angle[2], 0, 0, 1)
      gl.glTranslatef(0, 0, self._perturb(gym_duckietown.simulator.CAMERA_FORWARD_DIST))

    if top_down:
      gl.gluLookAt(
        # Eye position
        (self.grid_width * self.road_tile_size) / 2,
        self.top_cam_height,
        (self.grid_height * self.road_tile_size) / 2,
        # Target
        (self.grid_width * self.road_tile_size) / 2,
        0,
        (self.grid_height * self.road_tile_size) / 2,
        # Up vector
        0, 0, -1.0
      )
    else:
      gl.gluLookAt(
        # Eye position
        x,
        y,
        z,
        # Target
        x + dx,
        y + dy,
        z + dz,
        # Up vector
        0, 1.0, 0.0
      )

    # Draw the ground quad
    gl.glDisable(gl.GL_TEXTURE_2D)
    gl.glColor3f(*self.ground_color)
    gl.glPushMatrix()
    gl.glScalef(50, 1, 50)
    self.ground_vlist.draw(gl.GL_QUADS)
    gl.glPopMatrix()

    # Draw the ground/noise triangles
    self.tri_vlist.draw(gl.GL_TRIANGLES)

    # Draw the road quads
    gl.glEnable(gl.GL_TEXTURE_2D)
    gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
    gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)

    # For each grid tile
    for j in range(self.grid_height):
      for i in range(self.grid_width):
        # Get the tile type and angle
        tile = self._get_tile(i, j)

        if tile is None:
          continue

        # kind = tile['kind']
        angle = tile['angle']
        color = tile['color']
        texture = tile['texture']

        gl.glColor3f(*color)

        gl.glPushMatrix()
        gl.glTranslatef((i + 0.5) * self.road_tile_size, 0, (j + 0.5) * self.road_tile_size)
        gl.glRotatef(angle * 90, 0, 1, 0)

        # Bind the appropriate texture
        texture.bind()

        self.road_vlist.draw(gl.GL_QUADS)
        gl.glPopMatrix()

        if self.draw_curve and tile['drivable']:
          # Find curve with largest dotproduct with heading
          curves = self._get_tile(i, j)['curves']
          curve_headings = curves[:, -1, :] - curves[:, 0, :]
          curve_headings = curve_headings / np.linalg.norm(curve_headings).reshape(1, -1)
          dirVec = get_dir_vec(angle)
          dot_prods = np.dot(curve_headings, dirVec)

          # Current ("closest") curve drawn in Red
          pts = curves[np.argmax(dot_prods)]
          bezier_draw(pts, n=20, red=True)

          pts = self._get_curve(i, j)
          for idx, pt in enumerate(pts):
            # Don't draw current curve in blue
            if idx == np.argmax(dot_prods):
                continue
            bezier_draw(pt, n=20)

    # For each object
    for idx, obj in enumerate(self.objects):
      obj.render(self.draw_bbox)

    # Draw the agent's own bounding box
    if self.draw_bbox:
      corners = get_agent_corners(pos, angle)
      gl.glColor3f(1, 0, 0)
      gl.glBegin(gl.GL_LINE_LOOP)
      gl.glVertex3f(corners[0, 0], 0.01, corners[0, 1])
      gl.glVertex3f(corners[1, 0], 0.01, corners[1, 1])
      gl.glVertex3f(corners[2, 0], 0.01, corners[2, 1])
      gl.glVertex3f(corners[3, 0], 0.01, corners[3, 1])
      gl.glEnd()

    if top_down:
      gl.glPushMatrix()
      gl.glTranslatef(*self.cur_pos)
      gl.glScalef(1, 1, 1)
      gl.glRotatef(self.cur_angle * 180 / np.pi, 0, 1, 0)
      # glColor3f(*self.color)
      self.mesh.render()
      gl.glPopMatrix()

    # Resolve the multisampled frame buffer into the final frame buffer
    gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, multi_fbo)
    gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, final_fbo)
    gl.glBlitFramebuffer(
      0, 0,
      width, height,
      0, 0,
      width, height,
      gl.GL_COLOR_BUFFER_BIT,
      gl.GL_LINEAR
    )

    # Copy the frame buffer contents into a numpy array
    # Note: glReadPixels reads starting from the lower left corner
    gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, final_fbo)
    gl.glReadPixels(
      0,
      0,
      width,
      height,
      gl.GL_RGB,
      gl.GL_UNSIGNED_BYTE,
      img_array.ctypes.data_as(POINTER(gl.GLubyte))
    )

    # Unbind the frame buffer
    gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)

    # Flip the image because OpenGL maps (0,0) to the lower-left corner
    # Note: this is necessary for gym.wrappers.Monitor to record videos
    # properly, otherwise they are vertically inverted.
    img_array = np.ascontiguousarray(np.flip(img_array, axis=0))

    return img_array

    def render_obs(self):
        """
        Render an observation from the point of view of the agent
        """

        observation = self._render_img(
                self.camera_width,
                self.camera_height,
                self.multi_fbo,
                self.final_fbo,
                self.img_array,
                top_down=True
        )

        # self.undistort - for UndistortWrapper
        if self.distortion and not self.undistort:
            observation = self.camera_model.distort(observation)

        return observation