async def recv(self): # rotate image rows, cols, _ = self.img.shape M = cv2.getRotationMatrix2D((cols / 2, rows / 2), self.counter / 2, 1) rotated = cv2.warpAffine(self.img, M, (cols, rows)) frame = frame_from_bgr(rotated) self.counter += 1 # sleep if self.last: delta = self.last + VIDEO_PTIME - time.time() if delta > 0: await asyncio.sleep(delta) self.last = time.time() return frame
async def recv(self): frame = await self.received.get() self.counter += 1 if (self.counter % 100) > 50: # apply image processing to frame if self.transform == 'edges': img = frame_to_bgr(frame) edges = cv2.Canny(img, 100, 200) return frame_from_gray(edges) elif self.transform == 'rotate': img = frame_to_bgr(frame) rows, cols, _ = img.shape M = cv2.getRotationMatrix2D((cols / 2, rows / 2), self.counter * 7.2, 1) rotated = cv2.warpAffine(img, M, (cols, rows)) return frame_from_bgr(rotated) elif self.transform == 'green': return VideoFrame(width=frame.width, height=frame.height) else: return frame else: # return raw frame return frame
async def recv(self): coros = [track.recv() for track in self.tracks] frames = await asyncio.gather(*coros) data_bgrs = [frame_to_bgr(frame) for frame in frames] data_bgr = numpy.hstack(data_bgrs) return frame_from_bgr(data_bgr)
def __init__(self, width, height, color): data_bgr = numpy.zeros((height, width, 3), numpy.uint8) data_bgr[:, :] = color self.frame = frame_from_bgr(data_bgr)
def test_frame_from_bgr(self): image = numpy.full((480, 640, 3), (0, 0, 0), numpy.uint8) frame = frame_from_bgr(image) self.assertEqual(len(frame.data), 460800) self.assertEqual(frame.width, 640) self.assertEqual(frame.height, 480)
async def recv(self): ret, frame = self.cap.read() return frame_from_bgr(frame)