def main(): # Register exit handler signal.signal(signal.SIGINT, signal_handler) # print('Press Ctrl+C to exit.') # Load the cameras configuration data = yaml.load(open(Config.CAMS_YML, 'r')) cams = data['cams'] # type: dict print("Loaded {}".format(Config.CAMS_YML)) # Connect to the redis instance rdb = redis.StrictRedis(host=Config.REDIS_HOST, port=Config.REDIS_PORT, db=Config.REDIS_DB, decode_responses=True) # TODO: Consider whether we should read in some other way. # Clear keys so that the stats are right. for key in rdb.scan_iter("{}:*".format(Config.REDIS_PREFIX)): # print("Deleting: {}".format(key)) rdb.delete(key) # Create every cam feeder for cam_name, cam in cams.items(): print('Adding cam {0} to the dict'.format(cam_name)) cam_url = cam.get('cam_url') fps = cam.get('fps') queue = Queue() p = multiprocessing.Process(target=producer, args=(rdb, queue, cam_name, cam_url, fps)) p.start() if os.path.isdir(cam_url): while True: lst_file = os.listdir(cam_url) for file in lst_file: out_of_memory() file_url = os.path.join(cam_url, file) queue.put((-1, file)) cf = image2pipe.images_from_url(q=queue, video_url=file_url, fps=fps, scale=(1920, 1080)) cf.start() while cf.is_alive() is True: gevent.sleep(1) os.remove(file_url) print('Wait for new file') gevent.sleep(5) elif os.path.isfile(cam_url): queue.put((-1, cam_url)) cf = image2pipe.images_from_url(q=queue, video_url=cam_url, fps=fps, scale=(1920, 1080)) cf.start() watchdog(rdb)
def test(): Path("tmp").mkdir(exist_ok=True) q = Queue(maxsize=4) # equivalent to # ffmpeg -v error -ss 00:00:00 -i data/COVID-19.mp4 -an -sn -f image2pipe -vcodec rawvideo -pix_fmt rgb24 -vf fps=2,scale=640x360 decoder = image2pipe.images_from_url(q, "data/COVID-19.mp4", fps="2", scale=(640, 360), pix_fmt='rgb24') # if this is the press conference we might be scaling a (1280, 720) -> to (640, 360) here decoder.start() face_confidence = 0 for pair in yield_from_queue(q): i, img = pair timer_start = timer() image = Image.fromarray(img) is_face, confidence = face_in_image(image) timer_total = timer() - timer_start print("Checking for faces took {0:.3f}s.".format(timer_total)) if is_face: image.save("tmp/face_test_{}.jpg".format(i)) face_confidence += 1 if face_confidence > 10: break else: face_confidence = 0 print("{} {}:{}".format(i, is_face, confidence))
def test_vf(self): q = Queue() # decoder = image2pipe.images_from_url(q, VIDEO_URL, fps="30", scale=SCALE, vf=["cropdetect=24:16:0"]) decoder = image2pipe.images_from_url(q, VIDEO_URL, fps="30", scale=SCALE, vf=["crop=224:224:0:36"]) decoder.start() fn, img = q.get() cv2.imshow("frame %d" % 0, img) cv2.waitKey() cv2.destroyAllWindows()
def test_rgb24_from_url(self): q = Queue() decoder = image2pipe.images_from_url(q, VIDEO_URL, fps="30", scale=SCALE) decoder.start() for i in range(30): fn, img = q.get() cv2.imshow("frame %d" % i, img) cv2.waitKey() cv2.destroyAllWindows()
def test_min_params(self): q = Queue() decoder = image2pipe.images_from_url(q, VIDEO_URL) decoder.start() for i in range(30): fn, img = q.get() cv2.imshow("frame %d" % i, img) cv2.waitKey() cv2.destroyAllWindows()
def test_stitch(self): fps = "30" out_url = "out.ts" scale = (1000, 552) bgr_q = Queue() decoder = image2pipe.images_from_url(bgr_q, VIDEO_URL, fps="30", scale=(1000, 552)) decoder.start() FORMAT_MPEGTS = "mpegts" rtmpt = image2pipe.StitchVideoProcess(bgr_q, out_url, fps, scale, FORMAT_MPEGTS) rtmpt.start() rtmpt.join()
def __init__(self, filename: str, frame_range: Optional[Tuple[int, int]] = None, process_nth_frames: Optional[int] = None, min_size: Optional[int] = None, video_info: Optional[VideoInfo] = None): if video_info is not None: frame_count, width, height, fps = video_info self.frame_count = frame_count self.width = width self.height = height self.fps = fps else: info = ffprobe(filename)['streams'][0] frame_count = int(info['nb_frames']) self.frame_count = frame_count width = int(info['width']) height = int(info['height']) self.width = width self.height = height fps = Fraction(info['avg_frame_rate']) self.fps = float(fps) self.queue = Queue() if frame_range is not None: frame_index_start, frame_index_end = frame_range if frame_index_end == -1: frame_index_end = frame_count if frame_index_start < 0: print( f"WARNING! Invalid frame_range {frame_range}. frame_index_start cant be less then 0. Using default value of 0 instead." ) frame_index_start = 0 if frame_index_end > frame_count: print( f"WARNING! Invalid frame_range {frame_range}. frame_index_end cant be greater then {frame_count }. Using default value of {frame_count - 1} instead." ) frame_index_end = frame_count else: frame_index_start = 0 frame_index_end = frame_count ss = str(float(frame_index_start / fps)) # to = str(float(frame_index_end / fps)) self._frame_index_end = frame_index_end self._frame_index_start = frame_index_start if process_nth_frames: fps /= process_nth_frames if min_size is not None: if width < height: scale = (min_size, int(round(height / (width / min_size)))) else: scale = (int(round(width / (height / min_size))), min_size) else: scale = (width, height) decoder = image2pipe.images_from_url(self.queue, filename, ss=ss, fps=fps, scale=scale, buffer_size=scale) decoder.start()
if (invert): temp = cv2.bitwise_not(temp) return temp def crop(image, coordinates): x_start = coordinates[2] x_end = x_start + coordinates[0] y_start = coordinates[3] y_end = y_start + coordinates[1] return image[y_start:y_end, x_start:x_end] q = Queue() decoder = image2pipe.images_from_url(q, sys.argv[1], fps=sys.argv[2], scale=(1920, 1080)) decoder.start() with open(sys.argv[3], "a") as out: sys.stdout = out for frame_number, image in image2pipe.utils.yield_from_queue(q): tesseract = tesserpy.Tesseract("/usr/share/tesseract-ocr/tessdata", language="eng", oem=tesserpy.OEM_TESSERACT_ONLY) tesseract.tessedit_pageseg_mode = tesserpy.PSM_SINGLE_LINE frametime = datetime.utcnow().isoformat() print(frametime + " #" + str(frame_number), end=' ') for datum, settings in crops.items(): dir(settings) cropped = crop(image, settings['coords'])
# if args.read_path is None or args.save_path is None: # parser.print_usage() # sys.exit(1) # use CUDA? args.cuda = not args.no_gpu and torch.cuda.is_available() frames_q = None if args.read_path.find("://") > -1: print("Decoding input video %s" % args.read_path) frames_q = Queue(maxsize=NUM_IN_FR_EXT * 5) if args.read_path.find(":/") > -1: probe = image2pipe.ffprobe(args.read_path) decoder = image2pipe.images_from_url(frames_q, args.read_path, scale=None) decoder.start() # If save_path does not exist, create it if not os.path.exists(args.save_path): os.makedirs(args.save_path) # Sets data type according to CPU or GPU modes if args.cuda: device = torch.device('cuda') else: device = torch.device('cpu') # Create models print('Loading model ...')