Example #1
0
    def _decode_video_packet(self, video_packet):
        # # Some timing and profiling
        # pr = cProfile.Profile()
        # pr.enable()
        # clock = pyglet.clock.get_default()
        # t0 = clock.time()

        width = self.video_format.width
        height = self.video_format.height
        pitch = width * 4
        # https://ffmpeg.org/doxygen/3.3/group__lavc__decoding.html#ga8f5b632a03ce83ac8e025894b1fc307a
        nbytes = (pitch * height + FF_INPUT_BUFFER_PADDING_SIZE)
        buffer = (c_uint8 * nbytes)()
        try:
            result = self._ffmpeg_decode_video(video_packet.packet, buffer)
        except FFmpegException:
            image_data = None
        else:
            image_data = image.ImageData(width, height, 'RGBA', buffer, pitch)
            timestamp = ffmpeg_get_frame_ts(self._video_stream)
            timestamp = timestamp_from_ffmpeg(timestamp)
            video_packet.timestamp = timestamp - self.start_time

        video_packet.image = image_data

        if _debug:
            print('Decoding video packet at timestamp', video_packet.timestamp)
Example #2
0
    def _decode_video_packet(self, video_packet):
        # # Some timing and profiling
        # pr = cProfile.Profile()
        # pr.enable()
        # clock = pyglet.clock.get_default()
        # t0 = clock.time()

        width = self.video_format.width
        height = self.video_format.height
        pitch = width * 4
        buffer = (c_uint8 * (pitch * height))()
        try:
            result = self._ffmpeg_decode_video(video_packet.packet,
                                               buffer)
        except FFmpegException:
            image_data = None
        else:
            image_data = image.ImageData(width, height, 'RGBA', buffer, pitch)
            timestamp = ffmpeg_get_frame_ts(self._video_stream)
            timestamp = timestamp_from_ffmpeg(timestamp)
            video_packet.timestamp = timestamp - self.start_time

        video_packet.image = image_data

        if _debug:
            print('Decoding video packet at timestamp', video_packet.timestamp)
Example #3
0
	def __init__(self, *args, **kwargs):
		#Let all of the arguments pass through
		self.simulator=kwargs['simulator']
		kwargs.pop('simulator', None)
		pyglet.window.Window.__init__(self, *args, **kwargs)
		self.wallList=[]
		for i in range(0,len(self.simulator.scene.walls)):
			numdot=self.simulator.scene.walls[i].shape[0]
			pos=[]
			clr=[]
			for j in range(0,numdot):
				x=self.simulator.scene.walls[i][j][0]
				y=self.simulator.scene.walls[i][j][1]
				pos.extend((x,y))
				clr.extend((255,255,255))
			self.wallList.append(pyglet.graphics.vertex_list(numdot,('v2i', tuple(pos)),('c3B', tuple(clr))))
		self.curPosList=pyglet.graphics.vertex_list(0,('v2i', ()),('c3B', ()))
		self.curRealList=pyglet.graphics.vertex_list(0,('v2i', ()),('c3B', ()))
		self.depthList=pyglet.graphics.vertex_list(0,('v2i', ()),('c3B', ()))
		self.sceneImg=[]
		self.buffLen=6
		for i in range(0,self.buffLen):
			img = cv2.imread("D:/crowdData/"+self.simulator.name+"/Frames/%06d.jpg"%(i*20))
			warpedImg = cv2.warpPerspective(img,M,(3*warpscale,3*warpscale))
			self.sceneImg.append(image.ImageData(3*warpscale,3*warpscale,'BGR',warpedImg.tostring()))
		self.vhead=0
		self.vtail=3
Example #4
0
    def __init__(self):

        # A Batch is a collection of vertex lists for batched rendering.
        self.batch = pyglet.graphics.Batch()

        # A TextureGroup manages an OpenGL texture.
        group = MultiTextureGroup(gl.TEXTURE1,
                                  image.load(TEXTURE_PATH).get_texture())
        data = image.ImageData(1, 1, 'RGB', bytes([1, 2, 3]))
        self.group = MultiTextureGroup(gl.TEXTURE0, data.get_texture(), group)

        # A mapping from position to the texture of the block at that position.
        # This defines all the blocks that are currently in the world.
        self.world = {}

        # Same mapping as `world` but contains block orientations
        self.orientation = {}

        # Same mapping as `world` but contains core network ids
        self.line = {}

        # Mapping from position to a pyglet `VertextList` for all shown blocks.
        self._shown = {}

        self.network = core.Network()

        self._initialize()
Example #5
0
    def set_image(self, image_buffer):
        """Set image buffer data

        Parameters
        ----------
        image_buffer : array
            N x M x 3 (or 4) array. Can be type ``np.float64`` or ``np.uint8``.
            If ``np.float64``, color values must range between 0 and 1.
            ``np.uint8`` is slightly more efficient.
        """
        from pyglet import image, sprite
        image_buffer = np.ascontiguousarray(image_buffer)
        if image_buffer.dtype not in (np.float64, np.uint8):
            raise TypeError('image_buffer must be np.float64 or np.uint8')
        if image_buffer.dtype == np.float64:
            if image_buffer.max() > 1 or image_buffer.min() < 0:
                raise ValueError('all float values must be between 0 and 1')
            image_buffer = (image_buffer * 255).astype('uint8')
        if image_buffer.ndim == 2:  # grayscale
            image_buffer = np.tile(image_buffer[..., np.newaxis], (1, 1, 3))
        if not image_buffer.ndim == 3 or image_buffer.shape[2] not in [3, 4]:
            raise RuntimeError('image_buffer incorrect size: {}'
                               ''.format(image_buffer.shape))
        # add alpha channel if necessary
        dims = image_buffer.shape
        fmt = 'RGB' if dims[2] == 3 else 'RGBA'
        self._sprite = sprite.Sprite(
            image.ImageData(dims[1], dims[0], fmt, image_buffer.tostring(),
                            -dims[1] * dims[2]))
Example #6
0
def own_render(environment, img):
    if environment.window is None:
        config = gl.Config(double_buffer=False)
        environment.window = window.Window(width=WINDOW_WIDTH,
                                           height=WINDOW_HEIGHT,
                                           resizable=False,
                                           config=config)

    environment.window.clear()
    environment.window.switch_to()
    environment.window.dispatch_events()
    gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
    gl.glMatrixMode(gl.GL_PROJECTION)
    gl.glLoadIdentity()
    gl.glMatrixMode(gl.GL_MODELVIEW)
    gl.glLoadIdentity()
    gl.glOrtho(0, WINDOW_WIDTH, 0, WINDOW_HEIGHT, 0, 10)
    width = img.shape[1]
    height = img.shape[0]
    img = np.ascontiguousarray(np.flip(img, axis=0))
    img_data = image.ImageData(
        width,
        height,
        'RGB',
        img.ctypes.data_as(POINTER(gl.GLubyte)),
        pitch=width * 3,
    )
    img_data.blit(0, 0, 0, width=WINDOW_WIDTH, height=WINDOW_HEIGHT)
    x, y, z = environment.cur_pos
    environment.text_label.text = "pos: (%.2f, %.2f, %.2f), angle: %d, steps: %d, speed: %.2f m/s" % (
        x, y, z, int(environment.cur_angle * 180 / math.pi),
        environment.step_count, environment.speed)
    environment.text_label.draw()

    gl.glFlush()
Example #7
0
    def on_draw():
        #w.clear()
        global parent
        global parentdiff
        global olddrawing, newdrawing
        global blitted
        global image_pixels
        global keeps
        global i

        if not blitted:
            """
            At the start we've not seen the target before,
            so draw it and store the pixel data.
            """
            pic.blit(0, 0)
            blitted = 1
            image_pixels = (gl.GLubyte * (4 * size))(0)
            gl.glReadPixels(0, 0, newdrawing.width, newdrawing.height,
                            gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, image_pixels)
            image_pixels = np.frombuffer(image_pixels,
                                         dtype=np.uint8).astype(np.int32)

        # Draw the new child
        newdrawing.draw()

        # Read the pixel data for the child and find out if its any good
        gl.glBindFramebufferEXT(gl.GL_FRAMEBUFFER_EXT, newdrawing.fb)
        gl.glReadPixels(0, 0, newdrawing.width, newdrawing.height, gl.GL_RGBA,
                        gl.GL_UNSIGNED_BYTE, a)
        gl.glBindFramebufferEXT(gl.GL_FRAMEBUFFER_EXT, 0)
        diff = compute_diff(a)

        if parent == None or diff < parentdiff:
            # The new drawing is better.
            # Redraw the parent as this child.
            # Set this child's diff as the new one to beat.
            parent = image.ImageData(newdrawing.width, newdrawing.height,
                                     "RGBA", a)
            parentdiff = diff
            draw_parent(parent, newdrawing.width)
        else:
            # The new drawing sucks. Replace it
            # dump it's framebuffer first though!!!!
            gl.glDeleteFramebuffersEXT(1, ctypes.byref(newdrawing.fb))
            newdrawing = olddrawing
        i += 1

        if (i % 20 == 0):
            # Use the window title to let the user know how we're doing
            w.set_caption(
                str(fps.get_fps()) + " " + str(parentdiff) + " " +
                str(log(parentdiff, 10)) + " " + str(i))

        fps.tick()
Example #8
0
    def _get_pyglet_img(self, pimg=None, pitch_dir=1):
        """ Return a pyglet.image.ImageData converted from a passed PIL image,
        or an empty ImageData if none passed. pitch_dir is for compatibility
        between PIL / game and pyglet coordinate systems (flipped y-axis).
        """

        imagedata = image.ImageData(self._w, self._h, "RGBA", self._pitch)

        if (pimg):
            imagedata.set_data("RGBA", pitch_dir * self._pitch, pimg.tobytes())
        return imagedata
Example #9
0
    def _create_glyph(self):
        # In FT positive pitch means `down` flow, in Pyglet ImageData
        # negative values indicate a top-to-bottom arrangement. So pitch must be inverted.
        # Using negative pitch causes conversions, so much faster to just swap tex_coords
        img = image.ImageData(self._width, self._height, 'A', self._data,
                              abs(self._pitch))
        glyph = self.font.create_glyph(img)
        glyph.set_bearings(self._baseline, self._lsb, self._advance_x)
        if self._pitch > 0:
            t = list(glyph.tex_coords)
            glyph.tex_coords = t[9:12] + t[6:9] + t[3:6] + t[:3]

        return glyph
Example #10
0
    def render(self, text):
        face = self.font.face
        FT_Set_Char_Size(face, 0, self.font._face_size, self.font._dpi,
                         self.font._dpi)
        glyph_index = fontconfig.FcFreeTypeCharIndex(byref(face), ord(text[0]))
        error = FT_Load_Glyph(face, glyph_index, FT_LOAD_RENDER)
        if error != 0:
            raise base.FontException('Could not load glyph for "%c"' % text[0],
                                     error)
        glyph_slot = face.glyph.contents
        width = glyph_slot.bitmap.width
        height = glyph_slot.bitmap.rows
        baseline = height - glyph_slot.bitmap_top
        lsb = glyph_slot.bitmap_left
        advance = int(f26p6_to_float(glyph_slot.advance.x))
        mode = glyph_slot.bitmap.pixel_mode
        pitch = glyph_slot.bitmap.pitch

        if mode == FT_PIXEL_MODE_MONO:
            # BCF fonts always render to 1 bit mono, regardless of render
            # flags. (freetype 2.3.5)
            bitmap_data = cast(glyph_slot.bitmap.buffer,
                               POINTER(c_ubyte * (pitch * height))).contents
            data = (c_ubyte * (pitch * 8 * height))()
            data_i = 0
            for byte in bitmap_data:
                # Data is MSB; left-most pixel in a byte has value 128.
                data[data_i + 0] = (byte & 0x80) and 255 or 0
                data[data_i + 1] = (byte & 0x40) and 255 or 0
                data[data_i + 2] = (byte & 0x20) and 255 or 0
                data[data_i + 3] = (byte & 0x10) and 255 or 0
                data[data_i + 4] = (byte & 0x08) and 255 or 0
                data[data_i + 5] = (byte & 0x04) and 255 or 0
                data[data_i + 6] = (byte & 0x02) and 255 or 0
                data[data_i + 7] = (byte & 0x01) and 255 or 0
                data_i += 8
            pitch <<= 3
        elif mode == FT_PIXEL_MODE_GRAY:
            # Usual case
            data = glyph_slot.bitmap.buffer
        else:
            raise base.FontException('Unsupported render mode for this glyph')

        # pitch should be negative, but much faster to just swap tex_coords
        img = image.ImageData(width, height, 'A', data, pitch)
        glyph = self.font.create_glyph(img)
        glyph.set_bearings(baseline, lsb, advance)
        t = list(glyph.tex_coords)
        glyph.tex_coords = t[9:12] + t[6:9] + t[3:6] + t[:3]

        return glyph
Example #11
0
    def _decode_video_packet(self, packet):
        timestamp = timestamp_from_avbin(packet.timestamp)

        width = self.video_format.width
        height = self.video_format.height
        pitch = width * 3
        buffer = (ctypes.c_uint8 * (pitch * height))()
        result = av.avbin_decode_video(self._video_stream, packet.data,
                                       packet.size, buffer)
        if result < 0:
            return None

        return BufferedImage(
            image.ImageData(width, height, 'RGB', buffer, pitch), timestamp)
Example #12
0
    def _decode_video_packet(self, packet):
        timestamp = packet.timestamp  # TODO: unused

        width = self.video_format.width
        height = self.video_format.height
        pitch = width * 3
        buffer = (ctypes.c_uint8 * (pitch * height))()
        result = av.avbin_decode_video(self._video_stream, packet.data,
                                       packet.size, buffer)
        if result < 0:
            image_data = None
        else:
            image_data = image.ImageData(width, height, 'RGB', buffer, pitch)

        return BufferedImage(image_data, packet.id)
Example #13
0
	def updateScene(self):
		self.updatePos()
		self.updateDepth()
		#update Background
		if self.simulator.t%20==0:
			img = cv2.imread("D:/crowdData/"+self.simulator.name+"/Frames/%06d.jpg"%(self.simulator.t+60))
			warpedImg = cv2.warpPerspective(img,M,(3*warpscale,3*warpscale))
			self.sceneImg[self.vtail]=image.ImageData(3*warpscale,3*warpscale,'BGR',warpedImg.tostring())
			self.vtail=(self.vtail+1)%self.buffLen
			self.vhead=(self.vhead+1)%self.buffLen
		self.label = pyglet.text.Label('%d'%(self.simulator.t),
		                          font_name='Times New Roman',
		                          font_size=36,
		                          x=window.width//2, y=window.height//2,
		                          color=(255,255,255,255),
		                          anchor_x='center', anchor_y='center')
Example #14
0
File: avbin.py Project: pyzh/pyglet
    def _decode_video_packet(self, packet):
        width = self.video_format.width
        height = self.video_format.height
        pitch = width * 3
        buffer = (ctypes.c_uint8 * (pitch * height))()
        result = av.avbin_decode_video(self._video_stream, packet.data,
                                       packet.size, buffer)
        if result < 0:
            image_data = None
        else:
            image_data = image.ImageData(width, height, 'RGB', buffer, pitch)

        packet.image = image_data

        # Notify get_next_video_frame() that another one is ready.
        self._condition.acquire()
        self._condition.notify()
        self._condition.release()
Example #15
0
    def _render(self, mode='human', close=False):
        if not self.render_mode:
            return
        if close:
            if self.viewer is not None:
                self.viewer.close()
                self.viewer = None
            return
        try:
            small_img = self.current_obs
            if small_img is None:
                small_img = np.zeros(shape=(SCREEN_X, SCREEN_Y, 3),
                                     dtype=np.uint8)

            vae_img = resize(self._decode(self.z), (64, 64))
            WINDOW_HEIGHT = 600
            if DEBUG:
                small_img = resize(small_img, (64, 64))
                if DEBUG_NEXT:
                    next_img = resize(self._decode(self.next_z), (64, 64))
                    img = np.concatenate((small_img, next_img), axis=1)
                else:
                    img = np.concatenate((small_img, vae_img), axis=1)
                WINDOW_WIDTH = 1200
            else:
                WINDOW_WIDTH = 800
                img = vae_img
            if mode == 'rgb_array':
                return img
            elif mode == 'human':
                from pyglet import gl, window, image
                if self.window is None:
                    config = gl.Config(double_buffer=False)
                    self.window = window.Window(width=WINDOW_WIDTH,
                                                height=WINDOW_HEIGHT,
                                                resizable=False,
                                                config=config)

                self.window.clear()
                self.window.switch_to()
                self.window.dispatch_events()
                gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
                gl.glMatrixMode(gl.GL_PROJECTION)
                gl.glLoadIdentity()
                gl.glMatrixMode(gl.GL_MODELVIEW)
                gl.glLoadIdentity()
                gl.glOrtho(0, WINDOW_WIDTH, 0, WINDOW_HEIGHT, 0, 10)
                width = img.shape[1]
                height = img.shape[0]
                img = np.ascontiguousarray(np.flip(img, axis=0))
                from ctypes import POINTER
                img_data = image.ImageData(
                    width,
                    height,
                    'RGB',
                    img.ctypes.data_as(POINTER(gl.GLubyte)),
                    pitch=width * 3,
                )
                img_data.blit(0,
                              0,
                              0,
                              width=WINDOW_WIDTH,
                              height=WINDOW_HEIGHT)
        except Exception as e:
            print(e)  # Duckietown has been closed
Example #16
0
    def get_next_video_frame(self, skip_empty_frame=True):
        video_data_length = DWORD()
        flags = DWORD()
        timestamp = ctypes.c_longlong()

        if self._current_video_sample:
            self._current_video_buffer.Release()
            self._current_video_sample.Release()

        self._current_video_sample = IMFSample()
        self._current_video_buffer = IMFMediaBuffer()

        while True:
            self._source_reader.ReadSample(
                self._video_stream_index, 0, None, ctypes.byref(flags),
                ctypes.byref(timestamp),
                ctypes.byref(self._current_video_sample))

            if flags.value & MF_SOURCE_READERF_CURRENTMEDIATYPECHANGED:
                assert _debug('WMFVideoDecoder: Data is no longer valid.')

                # Get Major media type (Audio, Video, etc)
                new = IMFMediaType()
                self._source_reader.GetCurrentMediaType(
                    self._video_stream_index, ctypes.byref(new))

                # Sometimes this happens once. I think this only
                # changes if the stride is added/changed before playback?
                stride = ctypes.c_uint32()
                new.GetUINT32(MF_MT_DEFAULT_STRIDE, ctypes.byref(stride))

                self._stride = stride.value

            if flags.value & MF_SOURCE_READERF_ENDOFSTREAM:
                self._timestamp = None
                assert _debug(
                    'WMFVideoDecoder: End of data from stream source.')
                break

            if not self._current_video_sample:
                assert _debug('WMFVideoDecoder: No sample.')
                continue

            self._current_video_buffer = IMFMediaBuffer()

            # Convert to single buffer as a sample could potentially have multiple buffers.
            self._current_video_sample.ConvertToContiguousBuffer(
                ctypes.byref(self._current_video_buffer))

            video_data = POINTER(BYTE)()

            self._current_video_buffer.Lock(ctypes.byref(video_data), None,
                                            ctypes.byref(video_data_length))

            width = self.video_format.width
            height = self.video_format.height

            # buffer = ctypes.create_string_buffer(size)
            self._timestamp = timestamp_from_wmf(timestamp.value)

            self._current_video_buffer.Unlock()

            # This is made with the assumption that the video frame will be blitted into the player texture immediately
            # after, and then cleared next frame attempt.
            return image.ImageData(width, height, 'RGBA', video_data,
                                   self._stride)

        return None
Example #17
0
 def update_texture(self):
     pixels = bytes(self.get_pixels())
     #        width = len(pixels) // 3
     width = 1024
     self.group.texture = image.ImageData(width, 1, 'RGB',
                                          pixels).get_texture()
Example #18
0
  def render(self, mode='human', close=False, text=False):
    """
    Render the environment for human viewing
    """

    if close:
      if self.window:
        self.window.close()
      return

    top_down = mode == 'top_down'
    # Render the image
    top = self._render_img(
      WINDOW_WIDTH,
      WINDOW_HEIGHT,
      self.multi_fbo_human,
      self.final_fbo_human,
      self.img_array_human,
      top_down = True
    )
    bot = self._render_img(
      WINDOW_WIDTH,
      WINDOW_HEIGHT,
      self.multi_fbo_human,
      self.final_fbo_human,
      self.img_array_human,
      top_down = False
    )

    win_width = WINDOW_WIDTH
    if self._view_mode == FULL_VIEW_MODE:
      img = np.concatenate((top, bot), axis=1)
      win_width = 2*WINDOW_WIDTH
    elif self._view_mode == TOP_DOWN_VIEW_MODE:
      img = top
    else:
      img = bot

    if self.window is not None:
      self.window.set_size(win_width, WINDOW_HEIGHT)

    # self.undistort - for UndistortWrapper
    if self.distortion and not self.undistort and mode != "free_cam":
      img = self.camera_model.distort(img)

    if mode == 'rgb_array':
      return img

    from pyglet import gl, window, image

    if self.window is None:
      config = gl.Config(double_buffer=False)
      self.window = window.Window(
        width=win_width,
        height=WINDOW_HEIGHT,
        resizable=False,
        config=config
      )

    self.window.clear()
    self.window.switch_to()
    self.window.dispatch_events()

    # Bind the default frame buffer
    gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)

    # Setup orghogonal projection
    gl.glMatrixMode(gl.GL_PROJECTION)
    gl.glLoadIdentity()
    gl.glMatrixMode(gl.GL_MODELVIEW)
    gl.glLoadIdentity()
    gl.glOrtho(0, WINDOW_WIDTH, 0, WINDOW_HEIGHT, 0, 10)

    # Draw the image to the rendering window
    width = img.shape[1]
    height = img.shape[0]
    img = np.ascontiguousarray(np.flip(img, axis=0))
    img_data = image.ImageData(
      width,
      height,
      'RGB',
      img.ctypes.data_as(POINTER(gl.GLubyte)),
      pitch=width * 3,
    )
    img_data.blit(
      0,
      0,
      0,
      width=WINDOW_WIDTH,
      height=WINDOW_HEIGHT
    )

    # Display position/state information
    if text and mode != "free_cam":
      x, y, z = self.cur_pos
      self.text_label.text = "pos: (%.2f, %.2f, %.2f), angle: %d, steps: %d, speed: %.2f m/s" % (
        x, y, z,
        int(self.cur_angle * 180 / math.pi),
        self.step_count,
        self.speed
      )
      self.text_label.draw()

    # Force execution of queued commands
    gl.glFlush()
Example #19
0
 def grab_frame(self):
     cv_img = highgui.cvQueryFrame(self.camera)
     pil_img = opencv.adaptors.Ipl2PIL(cv_img).resize(
         [self.texture.width, self.texture.height])
     width, height = pil_img.size
     return image.ImageData(width, height, pil_img.mode, pil_img.tostring())