def _YUVtoVFrame(Y, U, V, format=None): assert Y.ndim == 2, "Y.ndim must be 2." assert U.ndim == 2, "U.ndim must be 2." assert V.ndim == 2, "V.ndim must be 2." assert Y.shape[0] % 2 == 0, "Y.shape[0] must be even." assert Y.shape[1] % 2 == 0, "Y.shape[1] must be even." assert U.shape == V.shape, "V.shape must be equal to U.shape." H, W = Y.shape h, w = U.shape if 2 * h == H and 2 * w == W: if format is None: format = "yuv420p" A = numpy.concatenate((Y.reshape(H * W), U.reshape(h * w), V.reshape(h * w))).reshape(3 * H // 2, W) return VideoFrame.from_ndarray(A, format=format) elif h == H and 2 * w == W: if format is None: format = "yuyv422" UV = numpy.concatenate( (U, V), axis=1).reshape(-1, 2, w).swapaxes(1, 2).reshape(-1, W) A = numpy.moveaxis((Y, UV), 0, 2) return VideoFrame.from_ndarray(A, format=format)
async def recv(self): frame = await self.track.recv() if self.transform == "cartoon": img = frame.to_ndarray(format="bgr24") # prepare color img_color = cv2.pyrDown(cv2.pyrDown(img)) for _ in range(6): img_color = cv2.bilateralFilter(img_color, 9, 9, 7) img_color = cv2.pyrUp(cv2.pyrUp(img_color)) # prepare edges img_edges = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img_edges = cv2.adaptiveThreshold( cv2.medianBlur(img_edges, 7), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 2, ) img_edges = cv2.cvtColor(img_edges, cv2.COLOR_GRAY2RGB) # combine color and edges img = cv2.bitwise_and(img_color, img_edges) # rebuild a VideoFrame, preserving timing information new_frame = VideoFrame.from_ndarray(img, format="bgr24") new_frame.pts = frame.pts new_frame.time_base = frame.time_base return new_frame elif self.transform == "edges": # perform edge detection img = frame.to_ndarray(format="bgr24") img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR) # rebuild a VideoFrame, preserving timing information new_frame = VideoFrame.from_ndarray(img, format="bgr24") new_frame.pts = frame.pts new_frame.time_base = frame.time_base return new_frame elif self.transform == "rotate": # rotate image img = frame.to_ndarray(format="bgr24") rows, cols, _ = img.shape M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1) img = cv2.warpAffine(img, M, (cols, rows)) # rebuild a VideoFrame, preserving timing information new_frame = VideoFrame.from_ndarray(img, format="bgr24") new_frame.pts = frame.pts new_frame.time_base = frame.time_base return new_frame else: return frame # end of file
async def recv(self): frame = await self.track.recv() self.counter += 1 if self.transform == 'edges': # perform edge detection img = frame.to_ndarray(format='bgr24') img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR) # rebuild a VideoFrame, preserving timing information new_frame = VideoFrame.from_ndarray(img, format='bgr24') new_frame.pts = frame.pts new_frame.time_base = frame.time_base return new_frame elif self.transform == 'rotate': # rotate image img = frame.to_ndarray(format='bgr24') rows, cols, _ = img.shape M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1) img = cv2.warpAffine(img, M, (cols, rows)) # rebuild a VideoFrame, preserving timing information new_frame = VideoFrame.from_ndarray(img, format='bgr24') new_frame.pts = frame.pts new_frame.time_base = frame.time_base return new_frame else: return frame
async def recv(self): global im pts, time_base = await self.next_timestamp() # create video frame if im == None: frame = VideoFrame.from_ndarray(self.img, format="bgr24") frame.pts = pts frame.time_base = time_base else: frame = VideoFrame.from_ndarray(im, format="bgr24") frame.pts = pts frame.time_base = time_base return frame
async def recv(self): pts, time_base = await self.next_timestamp() # read cameras ret1, frame1 = self.cap1.read() ret2, frame2 = self.cap2.read() if not ret1 and not ret2: return self.tex1.write(data=frame1) self.tex2.write(data=frame2) # update rotation parameters self.prog['yaw'].value = 0.0 self.prog['pitch'].value = 0.0 self.prog['roll'].value = 0.0 # render self.ctx.clear(1.0, 1.0, 1.0) self.vao.render(mode=6) self.fbo.read_into(self.frame, components=3) # output final_frame = np.array( Image.fromarray(self.frame.astype(np.uint8)).resize( self.output_size)) new_frame = VideoFrame.from_ndarray(final_frame, format="bgr24") new_frame.pts = pts new_frame.time_base = time_base return new_frame
async def recv(self): pts, time_base = await self.next_timestamp() ret, frame = self.cap.read() if not ret: return self.tex = self.ctx.texture((self.width, self.height), components=3, data=frame.tobytes()) self.tex.use() self.ctx.clear(1.0, 1.0, 1.0) self.vao = self.ctx.simple_vertex_array(self.prog, self.ctx.buffer(self.vertices), 'in_vert') self.vao.render(mode=6) img_buf = Image.frombytes('RGB', (self.width, self.height), self.fbo.read(components=3)) image_out = np.array(img_buf.convert('RGB')) new_frame = VideoFrame.from_ndarray(image_out, format="bgr24") new_frame.pts = pts new_frame.time_base = time_base return new_frame
async def recv(self): pts, time_base = await self.next_timestamp() frame = VideoFrame.from_ndarray(self.data_bgr, format='bgr24') frame.pts = pts frame.time_base = time_base return frame
async def recv(self): pts, time_base = await self.next_timestamp() _, img = self.cap.read() frame = VideoFrame.from_ndarray(img, format="bgr24") frame.pts = pts frame.time_base = time_base return frame
async def recv(self): frame = await self.track.recv() self.transform = "edges" if self.transform == "edges": # perform edge detection img = frame.to_ndarray(format="bgr24") # img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR) # # # rebuild a VideoFrame, preserving timing information # new_frame = VideoFrame.from_ndarray(img, format="bgr24") # new_frame.pts = frame.pts # new_frame.time_base = frame.time_base logo = cv2.imread('watermark.png') logo = cv2.resize(logo, (100, 60)) # width, height # Create a mask of logo img2gray = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(img2gray, 1, 255, cv2.THRESH_BINARY) # Region of Image (ROI), where we want to insert logo roi = img[ -60 - 10:-10, -100 - 10: -10] # height box, shift bottom:top, width box, shift right:left # NOTE it seems that shift values must be equal # Set an index of where the mask is roi[np.where(mask)] = 0 roi += logo # rebuild a VideoFrame, preserving timing information new_frame = VideoFrame.from_ndarray(img, format="bgr24") new_frame.pts = frame.pts new_frame.time_base = frame.time_base return new_frame else: return frame
async def recv(self): if self.track: frame = await self.track.recv() img = None try: # process video frame frame_img = frame.to_ndarray(format='bgr24') if isinstance(self.__frame_transformer, FrameTransformer): img = self.__frame_transformer.transform(frame_img, self.frame_idx) else: img = self.__frame_transformer(frame_img, self.frame_idx) except Exception as ex: logger.error(ex) if img is None and self.last_img is None: img = frame.to_ndarray(format='bgr24') elif img is None: img = self.last_img else: self.last_img = img self.frame_idx += 1 else: img = np.zeros((640, 480, 3)) # rebuild a VideoFrame, preserving timing information new_frame = VideoFrame.from_ndarray(img, format='bgr24') new_frame.pts = frame.pts new_frame.time_base = frame.time_base return new_frame
def __init__(self): super().__init__() # don't forget this! self.counter = 0 height, width = 480, 640 # generate flag data_bgr = np.hstack([ self._create_rectangle(width=213, height=480, color=(255, 0, 0)), # blue self._create_rectangle(width=214, height=480, color=(255, 255, 255)), # white self._create_rectangle(width=213, height=480, color=(0, 0, 255)), # red ]) # shrink and center it M = np.float32([[0.5, 0, width / 4], [0, 0.5, height / 4]]) data_bgr = cv2.warpAffine(data_bgr, M, (width, height)) # compute animation omega = 2 * np.pi / height id_x = np.tile(np.array(range(width), dtype=np.float32), (height, 1)) id_y = np.tile(np.array(range(height), dtype=np.float32), (width, 1)).transpose() self.frames = [] for k in range(30): phase = 2 * k * np.pi / 30 map_x = id_x + 10 * np.cos(omega * id_x + phase) map_y = id_y + 10 * np.sin(omega * id_x + phase) self.frames.append( VideoFrame.from_ndarray(cv2.remap(data_bgr, map_x, map_y, cv2.INTER_LINEAR), format="bgr24"))
async def recv(self): pts, time_base = await self.next_timestamp() global frame video_frame = VideoFrame.from_ndarray(frame, format="bgr24") video_frame.pts = pts video_frame.time_base = time_base return video_frame
async def recv(self): self.data_bgr = await self.camera_device.get_latest_frame() frame = VideoFrame.from_ndarray(self.data_bgr, format='bgr24') pts, time_base = await self.next_timestamp() frame.pts = pts frame.time_base = time_base return frame
def read_frame(self): # this is where we read a frame from the camera # this can be changed to a function that takes a frame in as a parameter, puts it in the buffer and then # allows that buffer to be used in the async send ret, frame = self.vid.read() if ret: self.last_frame = VideoFrame.from_ndarray(frame, format="bgr24")
async def recv(self): print(self.position) frame = await self.track.recv() img = frame.to_ndarray(format="bgr24") logo = cv2.imread('watermark.png') logo = cv2.resize(logo, (100, 60)) # width, height # Create a mask of logo img2gray = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(img2gray, 1, 255, cv2.THRESH_BINARY) # Region of Image (ROI), where we want to insert logo roi = img[ -60 - 10:-10, -100 - 10: -10] # height box, shift bottom:top, width box, shift right:left # NOTE it seems that shift values must be equal # Set an index of where the mask is roi[np.where(mask)] = 0 roi += logo xy1 = [self.powidth, self.poheight] xy2 = [self.powidth2, self.poheight2] # xy1 = [160, 120] # xy2 = [480, 360] img = cv2.rectangle(img, (xy1[0], xy1[1]), (xy2[0], xy2[1]), (0, 0, 255), 1, cv2.LINE_4) new_frame = VideoFrame.from_ndarray(img, format="bgr24") new_frame.pts = frame.pts new_frame.time_base = frame.time_base return new_frame
async def recv(self): frame = await self.track.recv() img = frame.to_ndarray(format="bgr24") img = detectFaces(img) new_frame = VideoFrame.from_ndarray(img, format="bgr24") new_frame.pts = frame.pts new_frame.time_base = frame.time_base return new_frame
async def recv(self): pts, time_base = await self.next_timestamp() ret, img = vs.read() img = cv2.resize(img, (256, 192)) frame = VideoFrame.from_ndarray(img, format="bgr24") frame.pts = pts frame.time_base = time_base return frame
async def recv(self): pts, time_base = await self.next_timestamp() # Send a new frame img = await self.framereader.getFrame() new_frame = VideoFrame.from_ndarray(img, format="bgr24") new_frame.pts = pts new_frame.time_base = time_base return new_frame
async def recv(self): pts, time_base = await self.next_timestamp() camera.capture(image, format="rgb", use_video_port=True) frame = VideoFrame.from_ndarray(image, format="rgb24") frame.pts = pts frame.time_base = time_base return frame
async def recv(self): pts, time_base = await self.next_timestamp() ret, new_img = self.video_capture.read() if ret is True and new_img is not None: self.img = new_img frame = VideoFrame.from_ndarray(self.img, format="bgr24") frame.pts = pts frame.time_base = time_base return frame
def test_ndarray_yuyv422_align(self): array = numpy.random.randint(0, 256, size=(238, 318, 2), dtype=numpy.uint8) frame = VideoFrame.from_ndarray(array, format='yuyv422') self.assertEqual(frame.width, 318) self.assertEqual(frame.height, 238) self.assertEqual(frame.format.name, 'yuyv422') self.assertTrue((frame.to_ndarray() == array).all())
def test_ndarray_yuv420p(self): array = numpy.random.randint(0, 256, size=(720, 640), dtype=numpy.uint8) frame = VideoFrame.from_ndarray(array, format='yuv420p') self.assertEqual(frame.width, 640) self.assertEqual(frame.height, 480) self.assertEqual(frame.format.name, 'yuv420p') self.assertTrue((frame.to_ndarray() == array).all())
async def recv(self): if not self.config.active: return None pts, time_base = await self.next_timestamp() ret, frm = vs.read() frm = self.process_frame(self.config, frm) frame = VideoFrame.from_ndarray(frm, format="bgr24") frame.pts = pts frame.time_base = time_base return frame
def test_ndarray_yuyv422_align(self): array = numpy.random.randint(0, 256, size=(238, 318, 2), dtype=numpy.uint8) frame = VideoFrame.from_ndarray(array, format="yuyv422") self.assertEqual(frame.width, 318) self.assertEqual(frame.height, 238) self.assertEqual(frame.format.name, "yuyv422") self.assertNdarraysEqual(frame.to_ndarray(), array)
def test_ndarray_bgr8(self): array = numpy.random.randint(0, 256, size=(480, 640), dtype=numpy.uint8) frame = VideoFrame.from_ndarray(array, format="bgr8") self.assertEqual(frame.width, 640) self.assertEqual(frame.height, 480) self.assertEqual(frame.format.name, "bgr8") self.assertNdarraysEqual(frame.to_ndarray(), array)
async def recv(self): frame = await self.track.recv() self.counter += 1 print(timer()) if self.transform == 'edges': # perform edge detection img = frame.to_ndarray(format='bgr24') img = cv2.cvtColor(cv2.Canny(img, 100, 200), cv2.COLOR_GRAY2BGR) # rebuild a VideoFrame, preserving timing information new_frame = VideoFrame.from_ndarray(img, format='bgr24') new_frame.pts = frame.pts new_frame.time_base = frame.time_base return new_frame elif self.transform == 'rotate': # rotate image img = frame.to_ndarray(format='bgr24') rows, cols, _ = img.shape M = cv2.getRotationMatrix2D((cols / 2, rows / 2), frame.time * 45, 1) img = cv2.warpAffine(img, M, (cols, rows)) # rebuild a VideoFrame, preserving timing information new_frame = VideoFrame.from_ndarray(img, format='bgr24') new_frame.pts = frame.pts new_frame.time_base = frame.time_base return new_frame else: start = timer() img = frame.to_ndarray(format='bgr24') result, encimg = cv2.imencode('.jpg', img, encode_param) # encimg = cv2.resize(encimg, (512, 512)) generated = transfer(encimg) end = timer() print('process time:', end - start) # rebuild a VideoFrame, preserving timing information new_frame = VideoFrame.from_ndarray(generated, format='rgb24') new_frame.pts = frame.pts new_frame.time_base = frame.time_base return new_frame
def test_ndarray_gbrp16_align(self): array = numpy.random.randint(0, 65536, size=(238, 318, 3), dtype=numpy.uint16) for format in ["gbrp16be", "gbrp16le"]: frame = VideoFrame.from_ndarray(array, format=format) self.assertEqual(frame.width, 318) self.assertEqual(frame.height, 238) self.assertEqual(frame.format.name, format) self.assertNdarraysEqual(frame.to_ndarray(), array)
def test_ndarray_rgba_align(self): array = numpy.random.randint(0, 256, size=(238, 318, 4), dtype=numpy.uint8) for format in ['argb', 'rgba', 'abgr', 'bgra']: frame = VideoFrame.from_ndarray(array, format=format) self.assertEqual(frame.width, 318) self.assertEqual(frame.height, 238) self.assertEqual(frame.format.name, format) self.assertTrue((frame.to_ndarray() == array).all())
def test_ndarray_rgb(self): array = numpy.random.randint(0, 256, size=(480, 640, 3), dtype=numpy.uint8) for format in ['rgb24', 'bgr24']: frame = VideoFrame.from_ndarray(array, format=format) self.assertEqual(frame.width, 640) self.assertEqual(frame.height, 480) self.assertEqual(frame.format.name, format) self.assertTrue((frame.to_ndarray() == array).all())
def test_ndarray_rgba_align(self): array = numpy.random.randint(0, 256, size=(238, 318, 4), dtype=numpy.uint8) for format in ["argb", "rgba", "abgr", "bgra"]: frame = VideoFrame.from_ndarray(array, format=format) self.assertEqual(frame.width, 318) self.assertEqual(frame.height, 238) self.assertEqual(frame.format.name, format) self.assertNdarraysEqual(frame.to_ndarray(), array)
def test_ndarray_gbrp14(self): array = numpy.random.randint(0, 16384, size=(480, 640, 3), dtype=numpy.uint16) for format in ["gbrp14be", "gbrp14le"]: frame = VideoFrame.from_ndarray(array, format=format) self.assertEqual(frame.width, 640) self.assertEqual(frame.height, 480) self.assertEqual(frame.format.name, format) self.assertNdarraysEqual(frame.to_ndarray(), array)
async def recv(self): if self._startTime is None and self._startedCallback is not None: self._startedCallback() try: img = await self._frameSubscription.get() except SubscriptionClosed: self._log.debug( "Video track finished. raising MediaStreamError to shut down connection" ) self.stop() raise MediaStreamError except: self._log.exception("Got unknown error. Crashing video stream") self.stop() raise MediaStreamError if self._startTime is None: self._startTime = time.time() new_frame = VideoFrame.from_ndarray(img, format="bgr24") new_frame.time_base = VIDEO_TIME_BASE # https://en.wikipedia.org/wiki/Presentation_timestamp if self._fps is None: # We assume that the frames arrive as fast as they are created. new_frame.pts = int((time.time() - self._startTime) * VIDEO_CLOCK_RATE) else: # We have a target frame rate. Here, we do something similar to the audioSubscription self._frameNumber += 1 perfectFrameNumber = int((time.time() - self._startTime) * self._fps) if self._canSkip: if perfectFrameNumber - self._fps * 1 > self._frameNumber: self._log.warn( "Received video frame is over 1 second behind optimal timestamp! Skipping frame forward! Use canSkip=False to disable this correction" ) self._frameNumber = perfectFrameNumber new_frame.pts = int(self._frameNumber * VIDEO_CLOCK_RATE / self._fps) if perfectFrameNumber + self._fps * 2 < self._frameNumber: # If the audio stream is over 2 seconds ahead, we wait 1 second before continuing self._log.debug( "Stream is over 2 seconds ahead. Sleeping for 1 second." ) await asyncio.sleep(1) self._log.debug("Writing frame %s", new_frame) return new_frame