コード例 #1
0
ファイル: detector_mtcnn.py プロジェクト: VeryCompany/BVPS
    def run(self):
        log.info("ready to startup camera:{}'s' mtcnn detector".format(
            self.camera.cameraId))
        self.mtcnn_detector = test_net(self.gpuId)

        log.info(
            "camera:{}'s' mtcnn detector successfully startup......".format(
                self.camera.cameraId))
        log.info(self.mtcnn_detector)
        frame_stat = StatValue()
        humans_stat = StatValue()
        frame_interval_stat = StatValue()
        humans_latency_stat = StatValue()
        last_info_time = clock()
        stat_time = 30  # 一分钟统计一次
        while True:
            try:
                frame, t0, secs = DetectorProcessor.frame_in.get()
                humans = self.detect_humans(frame, t0, secs)
                frame_stat.update(frame_stat.value + 1)
                for human in humans:
                    if not DetectorProcessor.frame_out2.full():
                        DetectorProcessor.frame_out2.put(human)  # for 识别器
                        humans_stat.update(humans_stat.value + 1)
                    if self.camera.cameraType == CameraType.CAPTURE:
                        if not DetectorProcessor.frame_out.full():
                            DetectorProcessor.frame_out.put(
                                human)  # for Trainor
                tn = clock()
                self.frame_interval.update(tn - self.last_frame_time)
                self.latency.update(tn - t0)
                frame_interval_stat.update(frame_interval_stat.value +
                                           self.frame_interval.value)
                humans_latency_stat.update(humans_latency_stat.value +
                                           self.latency.value)
                if tn - last_info_time > stat_time:
                    time_cost = tn - last_info_time
                    last_info_time = tn
                    log.info(
                        "MTCNN->pid:{},{},平均帧率:{:0.1f}/s,平均延迟:{:0.1f}s,处理frame:{:0.1f}/s,处理人脸:{:0.1f}/s"
                        .format(self.pid, self.camera.cameraId,
                                frame_stat.value / time_cost,
                                humans_latency_stat.value / time_cost,
                                frame_interval_stat.value / time_cost,
                                humans_stat.value / time_cost))
                    frame_stat.update(0)
                    humans_latency_stat.update(0)
                    frame_interval_stat.update(0)
                    humans_stat.update(0)
                self.last_frame_time = tn
            except Exception as e:
                exc_type, exc_value, exc_traceback = sys.exc_info()
                log.error(
                    traceback.format_exception(exc_type, exc_value,
                                               exc_traceback))
コード例 #2
0
ファイル: detectorthread.py プロジェクト: VeryCompany/BVPS
 def upperBodyDetector(self, image):
     gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     gray = cv2.equalizeHist(gray)
     t = clock()
     rects = self.detect(gray, self.upperBodyClassifier)
     uppers = []
     self.draw_detections(image, rects, thickness=1)
     #print "found {} upper body".format(len(rects))
     for x1, y1, x2, y2 in rects:
         roi = image[y1:y2, x1:x2]
         uppers.append(roi)
         continue
     dt = clock() - t
     draw_str(image, (20, 20), 'time: %.1f ms' % (dt * 1000))
     return uppers
コード例 #3
0
ファイル: pre_processor.py プロジェクト: VeryCompany/BVPS
 def __init__(self, camera, frame_in, frame_out):
     multiprocessing.Process.__init__(self, name="video_frame_PreProcessor")
     PreProcessor.frame_in = frame_in
     PreProcessor.frame_out = frame_out
     self.camera = camera
     self.frame_interval = StatValue()
     self.last_frame_time = clock()
     self.latency = StatValue()
コード例 #4
0
ファイル: detector_mtcnn.py プロジェクト: VeryCompany/BVPS
 def __init__(self, camera, frame_in, frame_out, frame_out_2, gpuId):
     multiprocessing.Process.__init__(self, name="video_human_detector")
     DetectorProcessor.frame_in = frame_in
     DetectorProcessor.frame_out = frame_out
     DetectorProcessor.frame_out2 = frame_out_2
     self.camera = camera
     self.frame_interval = StatValue()
     self.last_frame_time = clock()
     self.latency = StatValue()
     self.gpuId = gpuId
コード例 #5
0
ファイル: cameraServer.py プロジェクト: VeryCompany/BVPS
 def recognizeParallel(self, method, humans):
     """多线程并行运算,提高运算速度"""
     kt = clock()
     pool = ThreadPool(processes=self.threadn)
     self.pools[kt] = pool
     results = pool.map_async(method, humans)
     pool.close()
     pool.join()
     pool.terminate()
     self.pools.pop(kt, None)
     return results
コード例 #6
0
ファイル: cameraServer.py プロジェクト: VeryCompany/BVPS
 def run(self):
     latency = StatValue()
     trainoingserv = TrainingServer(self.trainor_queue, self.svm_queue)
     trainoingserv.start()
     while True:
         frame, t0, secs, start, end, uid = CameraServer.queue.get()
         self.process_frame(frame, t0, secs, start, end, uid)
         t = clock()
         latency.update(t - t0)
         log.debug("摄像头[{}]进程{}处理数据,处理耗时{:0.1f}ms...".format(
             self.cmd.cameraName, self.pid, latency.value * 1000))
コード例 #7
0
ファイル: detectorthread.py プロジェクト: VeryCompany/BVPS
 def __init__(self, *args, **kw):
     self.hog = cv2.HOGDescriptor()
     self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
     self.bodyClassifier = cv2.CascadeClassifier(
         os.path.join(harrsDir, "haarcascade_upperbody.xml"))
     self.faceClassifier = cv2.CascadeClassifier(
         os.path.join(harrsDir, "haarcascade_frontalface_alt.xml"))
     self.upperBodyClassifier = cv2.CascadeClassifier(
         os.path.join(harrsDir, "haarcascade_upperbody.xml"))
     self.HumanRecognizerProcessors = []
     self.frame_interval = StatValue()
     self.last_frame_time = clock()
     self.latency = StatValue()
コード例 #8
0
ファイル: camera.py プロジェクト: VeryCompany/BVPS
 def receiveMsg_TrainingCMD(self, cmd, sender):
     if cmd.cctype == CameraCmdType.TRAINOR_START:
         # self.training_start_time = int(cmd.msg)
         # self.training_end_time = int(cmd.msg + 10)
         # self.training_uid = cmd.uid
         # 将开始指令发送至训练器
         self.training_dset_q.put(
             TrainingCMD(CameraCmdType.TRAINOR_START, clock(), cmd.uid))
         log.info("用户{},时间{}".format(cmd.uid, cmd.msg))
     elif cmd.cctype == CameraCmdType.TRAINOR_CAPTURE_OK:
         self.training_start_time, self.training_end_time = None, None
         self.training_uid = cmd.uid
     elif cmd.cctype == CameraCmdType.MODEL_UPDATED:
         self.svm_model = cmd.msg
         self.svm_model_updated = True
コード例 #9
0
ファイル: pre_processor.py プロジェクト: VeryCompany/BVPS
 def run(self):
     from bvps.config import cameras as ca
     scale = ca[self.camera.cameraId]["scale"]
     threadn = cv2.getNumberOfCPUs()
     pool = ThreadPool(processes=threadn * 2)
     pending = deque()
     while True:
         while len(pending) > 0 and pending[0].ready():
             frame, t0, ts = pending.popleft().get()
             if not PreProcessor.frame_out.full():
                 PreProcessor.frame_out.put((frame, t0, ts))
         if len(pending) < threadn:
             frame, t0, ts = PreProcessor.frame_in.get()
             task = pool.apply_async(self.resize_frame,
                                     (frame, scale, t0, ts))
             pending.append(task)
             t = clock()
             self.latency.update(t - t0)
             self.frame_interval.update(t - self.last_frame_time)
             self.last_frame_time = t
コード例 #10
0
ファイル: detectorthread.py プロジェクト: VeryCompany/BVPS
 def detect_humans(self, cameraName, image, t0, secs):
     validHuman = []
     #for body in self.fullBodyHaarDetector(image):
     #cv2.imwrite("images/{}.body.jpg".format(self.num), body)
     faces = self.faceDetector(image)
     #if len(faces) > 0:
     #    log.debug("发现{}个人脸".format(len(faces)))
     #if len(faces) > 1:
     #    continue
     for face in faces:
         #cv2.imwrite("images/{}.face.jpg".format(self.num), face)
         validHuman.append((face, face, t0, secs))
     self.num += 1
     t = clock()
     self.latency.update(t - t0)
     self.frame_interval.update(t - self.last_frame_time)
     #if len(validHuman) > 0:
     #log.debug("发现有效人物目标{}个 图像延迟:{:0.1f} 目标检测器用时:{:0.1f} ms".format(
     #len(validHuman),self.latency.value * 1000, self.frame_interval.value * 1000))
     self.last_frame_time = t
     return validHuman
コード例 #11
0
ファイル: camera.py プロジェクト: VeryCompany/BVPS
    def startCapture(self):
        try:
            video = cv2.VideoCapture(self.cameraDevice)
            log.info("摄像头{}初始化参数".format(self.cameraName))
            for k, v in self.initCmd.values["video_properties"].items():
                video.set(k, v)
                log.info("video.set({},{})".format(k, v))
            forcc = self.initCmd.values[
                "fourcc"] if "fourcc" in self.initCmd.values else None
            if forcc is not None:
                video.set(
                    cv2.CAP_PROP_FOURCC,
                    cv2.VideoWriter_fourcc(forcc[0], forcc[1], forcc[2],
                                           forcc[3]))
            width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
            height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
            codec = video.get(cv2.CAP_PROP_FOURCC)
            self.resolution = (width, height)
            log.info("摄像头fps[{}] width:{} height:{} codec:{}".format(
                video.get(cv2.CAP_PROP_FPS), width, height, codec))
            log.info("亮度:{}".format(video.get(cv2.CAP_PROP_BRIGHTNESS)))
            log.info("对比度:{}".format(video.get(cv2.CAP_PROP_CONTRAST)))
            log.info("饱和度:{}".format(video.get(cv2.CAP_PROP_SATURATION)))
            log.info("色调:{}".format(video.get(cv2.CAP_PROP_HUE)))
            log.info("图像增益:{}".format(video.get(cv2.CAP_PROP_GAIN)))
            log.info("曝光:{}".format(video.get(cv2.CAP_PROP_EXPOSURE)))
            log.info("ISO:{}".format(video.get(cv2.CAP_PROP_ISO_SPEED)))
            log.info("RGB?:{}".format(video.get(cv2.CAP_PROP_CONVERT_RGB)))
            """
            # 处理方式 1
            frame_interval = StatValue()
            last_frame_time = clock()
            num = 10
            while True:
                try:
                    if self.stopped():
                        break
                    if video.grab():
                        ret, frame = video.retrieve()
                        frame_time = time.time()
                        if num % 50 == 0:
                            log.debug("读取摄像头{}frame{}".format(
                                self.cameraName, "成功" if ret else "失败!"))
                        t = clock()
                        frame_interval.update(t - last_frame_time)
                        if num % 50 == 0:
                            log.debug("摄像头{}.当前fps:{}".format(
                                self.cameraName,
                                int(1000 / (frame_interval.value * 1000))))
                        if ret:
                            if not self.camera.pre_process_queue.full():
                                self.camera.pre_process_queue.put_nowait(
                                    (frame, t, frame_time))
                            if not self.camera.frame_queue.full():
                                self.camera.frame_queue.put_nowait((frame, t,
                                                                   frame_time))
                        last_frame_time = t
                    num += 1
                except Exception, e:
                    log.info(e.message)
            """
            """处理方式2"""
            def process_frame(frame, t0, fts):
                if not self.camera.pre_process_queue.full():
                    self.camera.pre_process_queue.put((frame, t, fts))
                if not self.camera.frame_queue.full():
                    self.camera.frame_queue.put((frame, t, fts))
                return frame, t0, fts

            threadn = cv2.getNumberOfCPUs()
            pool = ThreadPool(processes=threadn)
            pending = deque()

            latency = StatValue()
            frame_interval = StatValue()
            last_frame_time = clock()
            while True:
                try:
                    while len(pending) > 0 and pending[0].ready():
                        frame, t0, fts = pending.popleft().get()
                        latency.update(clock() - t0)
                    if len(pending) < threadn:
                        frame_time = time.time()
                        ret, frame = video.read()
                        t = clock()
                        frame_interval.update(t - last_frame_time)
                        last_frame_time = t
                        if ret:
                            task = pool.apply_async(
                                process_frame, (frame.copy(), t, frame_time))
                            pending.append(task)
                except Exception as e:
                    exc_type, exc_value, exc_traceback = sys.exc_info()
                    log.error(
                        traceback.format_exception(exc_type, exc_value,
                                                   exc_traceback))

        except Exception as e:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            log.error(
                traceback.format_exception(exc_type, exc_value, exc_traceback))