def queue_monitor(self): pre_queue_stat = StatValue() human_detector_q_stat = StatValue() training_dset_q_stat = StatValue() recognizer_in_q_stat = StatValue() count_times = 1 while True: pre_queue_stat.update(pre_queue_stat.value + self.pre_process_queue.qsize()) human_detector_q_stat.update(human_detector_q_stat.value + self.human_detector_q.qsize()) training_dset_q_stat.update(training_dset_q_stat.value + self.training_dset_q.qsize()) recognizer_in_q_stat.update(recognizer_in_q_stat.value + self.recognizer_in_q.qsize()) if count_times > 10: log.info( "{}预处理待处理堆积{:0.1f},探测器待处理堆积{:0.1f},训练器待处理堆积{:0.1f},识别器待处理堆积{:0.1f}" .format(self.cameraId, pre_queue_stat.value / count_times, human_detector_q_stat.value / count_times, training_dset_q_stat.value / count_times, recognizer_in_q_stat.value / count_times)) count_times = 1 pre_queue_stat.update(0) human_detector_q_stat.update(0) training_dset_q_stat.update(0) recognizer_in_q_stat.update(0) count_times += 1 time.sleep(1)
def __init__(self, camera, frame_in, frame_out): multiprocessing.Process.__init__(self, name="video_frame_PreProcessor") PreProcessor.frame_in = frame_in PreProcessor.frame_out = frame_out self.camera = camera self.frame_interval = StatValue() self.last_frame_time = clock() self.latency = StatValue()
def __init__(self, camera, frame_in, frame_out, frame_out_2, gpuId): multiprocessing.Process.__init__(self, name="video_human_detector") DetectorProcessor.frame_in = frame_in DetectorProcessor.frame_out = frame_out DetectorProcessor.frame_out2 = frame_out_2 self.camera = camera self.frame_interval = StatValue() self.last_frame_time = clock() self.latency = StatValue() self.gpuId = gpuId
def run(self): log.info("ready to startup camera:{}'s' mtcnn detector".format( self.camera.cameraId)) self.mtcnn_detector = test_net(self.gpuId) log.info( "camera:{}'s' mtcnn detector successfully startup......".format( self.camera.cameraId)) log.info(self.mtcnn_detector) frame_stat = StatValue() humans_stat = StatValue() frame_interval_stat = StatValue() humans_latency_stat = StatValue() last_info_time = clock() stat_time = 30 # 一分钟统计一次 while True: try: frame, t0, secs = DetectorProcessor.frame_in.get() humans = self.detect_humans(frame, t0, secs) frame_stat.update(frame_stat.value + 1) for human in humans: if not DetectorProcessor.frame_out2.full(): DetectorProcessor.frame_out2.put(human) # for 识别器 humans_stat.update(humans_stat.value + 1) if self.camera.cameraType == CameraType.CAPTURE: if not DetectorProcessor.frame_out.full(): DetectorProcessor.frame_out.put( human) # for Trainor tn = clock() self.frame_interval.update(tn - self.last_frame_time) self.latency.update(tn - t0) frame_interval_stat.update(frame_interval_stat.value + self.frame_interval.value) humans_latency_stat.update(humans_latency_stat.value + self.latency.value) if tn - last_info_time > stat_time: time_cost = tn - last_info_time last_info_time = tn log.info( "MTCNN->pid:{},{},平均帧率:{:0.1f}/s,平均延迟:{:0.1f}s,处理frame:{:0.1f}/s,处理人脸:{:0.1f}/s" .format(self.pid, self.camera.cameraId, frame_stat.value / time_cost, humans_latency_stat.value / time_cost, frame_interval_stat.value / time_cost, humans_stat.value / time_cost)) frame_stat.update(0) humans_latency_stat.update(0) frame_interval_stat.update(0) humans_stat.update(0) self.last_frame_time = tn except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() log.error( traceback.format_exception(exc_type, exc_value, exc_traceback))
def __init__(self, *args, **kw): self.hog = cv2.HOGDescriptor() self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) self.bodyClassifier = cv2.CascadeClassifier( os.path.join(harrsDir, "haarcascade_upperbody.xml")) self.faceClassifier = cv2.CascadeClassifier( os.path.join(harrsDir, "haarcascade_frontalface_alt.xml")) self.upperBodyClassifier = cv2.CascadeClassifier( os.path.join(harrsDir, "haarcascade_upperbody.xml")) self.HumanRecognizerProcessors = [] self.frame_interval = StatValue() self.last_frame_time = clock() self.latency = StatValue()
def run(self): latency = StatValue() trainoingserv = TrainingServer(self.trainor_queue, self.svm_queue) trainoingserv.start() while True: frame, t0, secs, start, end, uid = CameraServer.queue.get() self.process_frame(frame, t0, secs, start, end, uid) t = clock() latency.update(t - t0) log.debug("摄像头[{}]进程{}处理数据,处理耗时{:0.1f}ms...".format( self.cmd.cameraName, self.pid, latency.value * 1000))
def startCapture(self): try: video = cv2.VideoCapture(self.cameraDevice) log.info("摄像头{}初始化参数".format(self.cameraName)) for k, v in self.initCmd.values["video_properties"].items(): video.set(k, v) log.info("video.set({},{})".format(k, v)) forcc = self.initCmd.values[ "fourcc"] if "fourcc" in self.initCmd.values else None if forcc is not None: video.set( cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(forcc[0], forcc[1], forcc[2], forcc[3])) width = video.get(cv2.CAP_PROP_FRAME_WIDTH) height = video.get(cv2.CAP_PROP_FRAME_HEIGHT) codec = video.get(cv2.CAP_PROP_FOURCC) self.resolution = (width, height) log.info("摄像头fps[{}] width:{} height:{} codec:{}".format( video.get(cv2.CAP_PROP_FPS), width, height, codec)) log.info("亮度:{}".format(video.get(cv2.CAP_PROP_BRIGHTNESS))) log.info("对比度:{}".format(video.get(cv2.CAP_PROP_CONTRAST))) log.info("饱和度:{}".format(video.get(cv2.CAP_PROP_SATURATION))) log.info("色调:{}".format(video.get(cv2.CAP_PROP_HUE))) log.info("图像增益:{}".format(video.get(cv2.CAP_PROP_GAIN))) log.info("曝光:{}".format(video.get(cv2.CAP_PROP_EXPOSURE))) log.info("ISO:{}".format(video.get(cv2.CAP_PROP_ISO_SPEED))) log.info("RGB?:{}".format(video.get(cv2.CAP_PROP_CONVERT_RGB))) """ # 处理方式 1 frame_interval = StatValue() last_frame_time = clock() num = 10 while True: try: if self.stopped(): break if video.grab(): ret, frame = video.retrieve() frame_time = time.time() if num % 50 == 0: log.debug("读取摄像头{}frame{}".format( self.cameraName, "成功" if ret else "失败!")) t = clock() frame_interval.update(t - last_frame_time) if num % 50 == 0: log.debug("摄像头{}.当前fps:{}".format( self.cameraName, int(1000 / (frame_interval.value * 1000)))) if ret: if not self.camera.pre_process_queue.full(): self.camera.pre_process_queue.put_nowait( (frame, t, frame_time)) if not self.camera.frame_queue.full(): self.camera.frame_queue.put_nowait((frame, t, frame_time)) last_frame_time = t num += 1 except Exception, e: log.info(e.message) """ """处理方式2""" def process_frame(frame, t0, fts): if not self.camera.pre_process_queue.full(): self.camera.pre_process_queue.put((frame, t, fts)) if not self.camera.frame_queue.full(): self.camera.frame_queue.put((frame, t, fts)) return frame, t0, fts threadn = cv2.getNumberOfCPUs() pool = ThreadPool(processes=threadn) pending = deque() latency = StatValue() frame_interval = StatValue() last_frame_time = clock() while True: try: while len(pending) > 0 and pending[0].ready(): frame, t0, fts = pending.popleft().get() latency.update(clock() - t0) if len(pending) < threadn: frame_time = time.time() ret, frame = video.read() t = clock() frame_interval.update(t - last_frame_time) last_frame_time = t if ret: task = pool.apply_async( process_frame, (frame.copy(), t, frame_time)) pending.append(task) except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() log.error( traceback.format_exception(exc_type, exc_value, exc_traceback)) except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() log.error( traceback.format_exception(exc_type, exc_value, exc_traceback))