def __init__(self): width = 640 height = 480 self.counter = 0 self.frame_green = VideoFrame(width=width, height=height) self.frame_remote = VideoFrame(width=width, height=height) self.last = None
def test_encoder(self): encoder = get_encoder(H264_CODEC) self.assertTrue(isinstance(encoder, H264Encoder)) frame = VideoFrame(width=640, height=480) frame.pts = 0 frame.time_base = VIDEO_TIME_BASE packages, timestamp = encoder.encode(frame) self.assertGreaterEqual(len(packages), 1)
def test_encoder(self): encoder = get_encoder(VP8_CODEC) self.assertTrue(isinstance(encoder, Vp8Encoder)) frame = VideoFrame(width=640, height=480) payloads = encoder.encode(frame) self.assertEqual(len(payloads), 1) self.assertTrue(len(payloads[0]) < 1300) frame = VideoFrame(width=320, height=240) payloads = encoder.encode(frame) self.assertEqual(len(payloads), 1) self.assertTrue(len(payloads[0]) < 1300)
def test_encoder(self): encoder = get_encoder(H264_CODEC) self.assertTrue(isinstance(encoder, H264Encoder)) frame = VideoFrame(width=640, height=480) packages = encoder.encode(frame) self.assertGreaterEqual(len(packages), 1)
def __init__(self, process=True, pc=None): width = WIDTH height = HEIGHT self.counter = 0 self.frame_green = VideoFrame(width=width, height=height) self.frame_remote = None #VideoFrame(width=width, height=height) self.last = None self.bgr_remote = None self.lensometer = Lensometer(algorithm=0, circle_d=98) # for old circles 146 self.dset = Dataset() self.dist = Distort() self.dset_counter = 0 self.MAX_CALIBR = 15 self.MAX_CALC = 20 self.CALIBR = 'calibr' self.CALC = 'calc' self.DSET_STOP = 'dset-stop' self.position_quality = False self.is_lens_appeared = False self.SPH = None self.width = width self.height = height self.f_width = width self.f_height = height self.datachannel = None self.transport = 0 self.process = process self.pc = pc self.track = None self.is_video = False print("Start session:", pc.uuid)
def test_encoder_large(self): encoder = get_encoder(VP8_CODEC) self.assertTrue(isinstance(encoder, VpxEncoder)) frame = VideoFrame(width=2560, height=1920) payloads = encoder.encode(frame) self.assertEqual(len(payloads), 7) self.assertEqual(len(payloads[0]), 1300)
async def recv(self): await asyncio.sleep(1) img_yuv = await self.recv_yuv() print(img_yuv.shape) img_yuv_bytes = img_yuv.tobytes() print(len(img_yuv_bytes)) print(img_yuv_bytes) return VideoFrame(width=self.width, height=self.height, data=img_yuv_bytes)
def test_encoder_large(self): encoder = get_encoder(VP8_CODEC) self.assertTrue(isinstance(encoder, Vp8Encoder)) # first keyframe frame = VideoFrame(width=2560, height=1920, timestamp=0) payloads = encoder.encode(frame) self.assertEqual(len(payloads), 7) self.assertEqual(len(payloads[0]), 1300) # delta frame frame = VideoFrame(width=2560, height=1920, timestamp=3000) payloads = encoder.encode(frame) self.assertEqual(len(payloads), 1) self.assertTrue(len(payloads[0]) < 1300) # force keyframe frame = VideoFrame(width=2560, height=1920, timestamp=6000) payloads = encoder.encode(frame, force_keyframe=True) self.assertEqual(len(payloads), 7) self.assertEqual(len(payloads[0]), 1300)
def test_frame_encoder(self): encoder = get_encoder(H264_CODEC) frame = VideoFrame(width=640, height=480) packages = list(encoder._encode_frame(frame, False)) self.assertGreaterEqual(len(packages), 3) # first frame must have at least set(p[0] & 0x1f for p in packages).issuperset({ 8, # PPS (picture parameter set) 7, # SPS (session parameter set) 5, # IDR (aka key frame) }) frame = VideoFrame(width=640, height=480) packages = list(encoder._encode_frame(frame, False)) self.assertGreaterEqual(len(packages), 1) # change resolution frame = VideoFrame(width=320, height=240) packages = list(encoder._encode_frame(frame, False)) self.assertGreaterEqual(len(packages), 1)
def frame_from_bgr(data_bgr, f_width, f_height): #cv2.putText(data_bgr,"HELLO CLIENT! I CAN SEE YOU." , (500,70), cv2.FONT_HERSHEY_SIMPLEX, 0.89, (80,100,30), 2) #cv2.putText(data_bgr," LENSOMETER v0.0.1" , (500,100), cv2.FONT_HERSHEY_SIMPLEX, 0.89, (80,180,30), 2) if data_bgr.shape[0] != f_height or f_width != data_bgr.shape[1]: #data_bgr = data_bgr.reshape(f_height, f_width) # for fixing problem witj VP8 resize - keep initial size data_bgr = cv2.resize(data_bgr, (f_width, f_height)) data_yuv = cv2.cvtColor(data_bgr, cv2.COLOR_BGR2YUV_YV12) #if data_bgr.shape[0] != f_height or f_width !=data_bgr.shape[1]: #data_bgr = data_bgr.reshape(f_height, f_width) # data_bgr = cv2.resize(data_bgr,(f_width,f_height)) #print('frame_from_bgr yuv',data_yuv.shape[1], data_yuv.shape[0],data_yuv.size,data_yuv.nbytes,'bgr',data_bgr.shape[1], data_bgr.shape[0]) return VideoFrame(width=data_bgr.shape[1], height=data_bgr.shape[0], data=data_yuv.tobytes())
def test_encoder(self): encoder = get_encoder(VP8_CODEC) self.assertTrue(isinstance(encoder, Vp8Encoder)) frame = VideoFrame(width=640, height=480) frame.pts = 0 frame.time_base = VIDEO_TIME_BASE payloads, timestamp = encoder.encode(frame) self.assertEqual(len(payloads), 1) self.assertTrue(len(payloads[0]) < 1300) self.assertEqual(timestamp, 0) # change resolution frame = VideoFrame(width=320, height=240) frame.pts = 3000 frame.time_base = VIDEO_TIME_BASE payloads, timestamp = encoder.encode(frame) self.assertEqual(len(payloads), 1) self.assertTrue(len(payloads[0]) < 1300) self.assertEqual(timestamp, 3000)
async def recv(self): frame = await self.received.get() self.counter += 1 if (self.counter % 100) > 50: # apply image processing to frame if self.transform == 'edges': img = frame_to_bgr(frame) edges = cv2.Canny(img, 100, 200) return frame_from_gray(edges) elif self.transform == 'rotate': img = frame_to_bgr(frame) rows, cols, _ = img.shape M = cv2.getRotationMatrix2D((cols / 2, rows / 2), self.counter * 7.2, 1) rotated = cv2.warpAffine(img, M, (cols, rows)) return frame_from_bgr(rotated) elif self.transform == 'green': return VideoFrame(width=frame.width, height=frame.height) else: return frame else: # return raw frame return frame
def roundtrip(self, width, height): """ Round-trip a VideoFrame through encoder then decoder. """ encoder = get_encoder(H264_CODEC) decoder = get_decoder(H264_CODEC) # encode frame = VideoFrame(width=width, height=height) packages = encoder.encode(frame) # depacketize data = b'' for package in packages: packet = RtpPacket(payload=package) decoder.parse(packet) data += packet._data # decode frames = decoder.decode(data) self.assertEqual(len(frames), 1) self.assertEqual(frames[0].width, width) self.assertEqual(frames[0].height, height)
async def recv(self): frame = await self.received.get() self.counter += 1 if (self.counter % 100) > 50: # apply image processing to frame if self.transform == 'edges': img = frame_to_bgr(frame) edges = cv2.Canny(img, 100, 200) return frame_from_gray(edges) elif self.transform == 'rotate': img = frame_to_bgr(frame) rows, cols, _ = img.shape M = cv2.getRotationMatrix2D((cols / 2, rows / 2), self.counter * 7.2, 1) rotated = cv2.warpAffine(img, M, (cols, rows)) return frame_from_bgr(rotated) elif self.transform == 'green': return VideoFrame(width=frame.width, height=frame.height) elif self.transform == 'blue': # NG: return VideoFrame(width=320, height=240, data=b'\xA7' * 76800 + b'\xA7' * 19200 + b'\x50' * 19200) # --- OK ---- ysize = math.ceil(frame.width * frame.height) usize = math.ceil(frame.width * frame.height / 4) vsize = math.ceil(frame.width * frame.height / 4) yuvdata = b'\xA7' * ysize + b'\xA7' * usize + b'\x50' * vsize return VideoFrame(width=frame.width, height=frame.height, data=yuvdata) elif self.transform == 'rect': img = frame_to_bgr(frame) rows, cols, _ = img.shape drawRect = cv2.rectangle(img, (int(rows/4), int(cols/4)), (int(rows/2), int(cols/2)), (255, 0, 0), 3, 4) drawText = cv2.putText(drawRect, 'Camera Test', (int(rows/4), int(cols/4)), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2, 4) newFrame = frame_from_bgr(drawText) return newFrame elif self.transform == 'yolov3': img = frame_to_bgr(frame) rows, cols, _ = img.shape # --- try detection image -- # --- OK , but slow dn_image = array_to_image(img) im_detected = detect_draw(dn_image, thresh=.5, hier_thresh=.5, nms=.45) arr = image_to_array(im_detected) newFrame = frame_from_bgr(arr) return newFrame elif self.transform == 'yolov3_rectcv': img = frame_to_bgr(frame) rows, cols, _ = img.shape dn_image = array_to_image(img) results = detect_area(dn_image, thresh=.5, hier_thresh=.5, nms=.45) for result in results: #name = result[0] # error name = str(result[0]) # b'abc' #name = '' + str(result[0]) # b'abc' area = result[2] left = int(area[0] - area[2]/2) top = int(area[1] - area[3]/2) right = int(area[0] + area[2]/2) bottom = int(area[1] + area[3]/2) img = cv2.rectangle(img, (left, top), (right, bottom), (255, 128,0), 3, 4) img = cv2.putText(img, name, (left, top), cv2.FONT_HERSHEY_PLAIN, 2, (255, 128, 0), 2, 4) newFrame = frame_from_bgr(img) return newFrame else: return frame else: # return raw frame return frame
def frame_from_gray(data_gray): data_bgr = cv2.cvtColor(data_gray, cv2.COLOR_GRAY2BGR) data_yuv = cv2.cvtColor(data_bgr, cv2.COLOR_BGR2YUV_YV12) return VideoFrame(width=data_bgr.shape[1], height=data_bgr.shape[0], data=data_yuv.tobytes())
def __init__(self, container, video_stream, width, height): self.container = container self.video_stream = video_stream self.frame_skip = 300 self.total_frame = 0 self.frame = VideoFrame(width=width, height=height)
def test_encoder_large(self): encoder = get_encoder(VP8_CODEC) self.assertTrue(isinstance(encoder, Vp8Encoder)) # first keyframe frame = VideoFrame(width=2560, height=1920) frame.pts = 0 frame.time_base = VIDEO_TIME_BASE payloads, timestamp = encoder.encode(frame) self.assertEqual(len(payloads), 7) self.assertEqual(len(payloads[0]), 1300) self.assertEqual(timestamp, 0) # delta frame frame = VideoFrame(width=2560, height=1920) frame.pts = 3000 frame.time_base = VIDEO_TIME_BASE payloads, timestamp = encoder.encode(frame) self.assertEqual(len(payloads), 1) self.assertTrue(len(payloads[0]) < 1300) self.assertEqual(timestamp, 3000) # force keyframe frame = VideoFrame(width=2560, height=1920) frame.pts = 6000 frame.time_base = VIDEO_TIME_BASE payloads, timestamp = encoder.encode(frame, force_keyframe=True) self.assertEqual(len(payloads), 7) self.assertEqual(len(payloads[0]), 1300) self.assertEqual(timestamp, 6000)