def test_reformat_pts(self): frame = VideoFrame(640, 480, 'rgb24') frame.pts = 123 frame.time_base = '456/1' # Just to be different. frame = frame.reformat(320, 240) self.assertEqual(frame.pts, 123) self.assertEqual(frame.time_base, 456)
def create_video_frame(self, width, height, pts, format='yuv420p', time_base=VIDEO_TIME_BASE): """ Create a single blank video frame. """ frame = VideoFrame(width=width, height=height, format=format) for p in frame.planes: p.update(bytes(p.buffer_size)) frame.pts = pts frame.time_base = time_base return frame
async def recv(self) -> Frame: """ Receive the next :class:`~av.video.frame.VideoFrame`. The base implementation just reads a 640x480 green frame at 30fps, subclass :class:`VideoStreamTrack` to provide a useful implementation. """ pts, time_base = await self.next_timestamp() frame = VideoFrame(width=640, height=480) for p in frame.planes: p.update(bytes(p.buffer_size)) frame.pts = pts frame.time_base = time_base return frame
def decode(self, encoded_frame: JitterFrame) -> List[VideoFrameExt]: frames: List[VideoFrameExt] = [] result = lib.vpx_codec_decode( self.codec, encoded_frame.data, len(encoded_frame.data), ffi.NULL, lib.VPX_DL_REALTIME, ) if result == lib.VPX_CODEC_OK: it = ffi.new("vpx_codec_iter_t *") while True: img = lib.vpx_codec_get_frame(self.codec, it) if not img: break assert img.fmt == lib.VPX_IMG_FMT_I420 frame = VideoFrame(width=img.d_w, height=img.d_h) frame.pts = encoded_frame.timestamp frame.time_base = VIDEO_TIME_BASE frame_ext = VideoFrameExt( frame=frame, ntp_timestamp=encoded_frame.ntp_timestamp + timedelta( seconds=encoded_frame.rtp_diff / VIDEO_CLOCK_RATE), encoded_frame=encoded_frame, ) for p in range(3): i_stride = img.stride[p] i_buf = ffi.buffer(img.planes[p], i_stride * img.d_h) i_pos = 0 o_stride = frame.planes[p].line_size o_buf = memoryview(cast(bytes, frame.planes[p])) o_pos = 0 div = p and 2 or 1 for r in range(0, img.d_h // div): o_buf[o_pos:o_pos + o_stride] = i_buf[i_pos:i_pos + o_stride] i_pos += i_stride o_pos += o_stride frames.append(frame_ext) return frames
def test_encoding_with_pts(self): path = self.sandboxed("video_with_pts.mov") with av.open(path, "w") as output: stream = output.add_stream("libx264", 24) stream.width = WIDTH stream.height = HEIGHT stream.pix_fmt = "yuv420p" for i in range(DURATION): frame = VideoFrame(WIDTH, HEIGHT, "rgb24") frame.pts = i * 2000 frame.time_base = Fraction(1, 48000) for packet in stream.encode(frame): self.assertEqual(packet.time_base, Fraction(1, 24)) output.mux(packet) for packet in stream.encode(None): self.assertEqual(packet.time_base, Fraction(1, 24)) output.mux(packet)
def decode(self, encoded_frame): frames = [] result = lib.vpx_codec_decode( self.codec, encoded_frame.data, len(encoded_frame.data), ffi.NULL, lib.VPX_DL_REALTIME, ) if result == lib.VPX_CODEC_OK: it = ffi.new("vpx_codec_iter_t *") while True: img = lib.vpx_codec_get_frame(self.codec, it) if not img: break assert img.fmt == lib.VPX_IMG_FMT_I420 frame = VideoFrame(width=img.d_w, height=img.d_h) frame.pts = encoded_frame.timestamp frame.time_base = VIDEO_TIME_BASE for p in range(3): i_stride = img.stride[p] i_buf = ffi.buffer(img.planes[p], i_stride * img.d_h) i_pos = 0 o_stride = frame.planes[p].line_size o_buf = memoryview(frame.planes[p]) o_pos = 0 div = p and 2 or 1 for r in range(0, img.d_h // div): o_buf[o_pos:o_pos + o_stride] = i_buf[i_pos:i_pos + o_stride] i_pos += i_stride o_pos += o_stride frames.append(frame) return frames
def test_encoding_with_pts(self): path = self.sandboxed('video_with_pts.mov') output = av.open(path, 'w') stream = output.add_stream('libx264', 24) stream.width = WIDTH stream.height = HEIGHT stream.pix_fmt = "yuv420p" for i in range(DURATION): frame = VideoFrame(WIDTH, HEIGHT, 'rgb24') frame.pts = i * 2000 frame.time_base = Fraction(1, 48000) for packet in stream.encode(frame): self.assertEqual(packet.time_base, Fraction(1, 24)) output.mux(packet) for packet in stream.encode(None): self.assertEqual(packet.time_base, Fraction(1, 24)) output.mux(packet) output.close()