def test_video_frame_from_bgr(self): image = numpy.full((480, 640, 3), (0, 0, 0), numpy.uint8) frame = video_frame_from_bgr(image, timestamp=123) self.assertEqual(len(frame.data), 460800) self.assertEqual(frame.width, 640) self.assertEqual(frame.height, 480) self.assertEqual(frame.pts, 123)
async def recv(self): frame = await self.track.recv() self.counter += 1 # apply image processing to frame if self.transform == 'edges': img = video_frame_to_bgr(frame) edges = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR) return video_frame_from_bgr(edges, timestamp=frame.pts) elif self.transform == 'rotate': img = video_frame_to_bgr(frame) rows, cols, _ = img.shape M = cv2.getRotationMatrix2D((cols / 2, rows / 2), self.counter * 1.8, 1) rotated = cv2.warpAffine(img, M, (cols, rows)) return video_frame_from_bgr(rotated, timestamp=frame.pts) else: return frame
async def recv(self): timestamp = await self.next_timestamp() # rotate image rows, cols, _ = self.img.shape M = cv2.getRotationMatrix2D((cols / 2, rows / 2), self.counter / 2, 1) rotated = cv2.warpAffine(self.img, M, (cols, rows)) frame = video_frame_from_bgr(rotated, timestamp=timestamp) self.counter += 1 return frame
async def recv(self): timestamp = await self.next_timestamp() return video_frame_from_bgr(self.data_bgr, timestamp=timestamp)