示例#1
0
 def run(self):
     latency = StatValue()
     trainoingserv = TrainingServer(self.trainor_queue, self.svm_queue)
     trainoingserv.start()
     while True:
         frame, t0, secs, start, end, uid = CameraServer.queue.get()
         self.process_frame(frame, t0, secs, start, end, uid)
         t = clock()
         latency.update(t - t0)
         log.debug("摄像头[{}]进程{}处理数据,处理耗时{:0.1f}ms...".format(
             self.cmd.cameraName, self.pid, latency.value * 1000))
示例#2
0
class PreProcessor(multiprocessing.Process):
    def __init__(self, camera, frame_in, frame_out):
        multiprocessing.Process.__init__(self, name="video_frame_PreProcessor")
        PreProcessor.frame_in = frame_in
        PreProcessor.frame_out = frame_out
        self.camera = camera
        self.frame_interval = StatValue()
        self.last_frame_time = clock()
        self.latency = StatValue()

    def run(self):
        from bvps.config import cameras as ca
        scale = ca[self.camera.cameraId]["scale"]
        threadn = cv2.getNumberOfCPUs()
        pool = ThreadPool(processes=threadn * 2)
        pending = deque()
        while True:
            while len(pending) > 0 and pending[0].ready():
                frame, t0, ts = pending.popleft().get()
                if not PreProcessor.frame_out.full():
                    PreProcessor.frame_out.put((frame, t0, ts))
            if len(pending) < threadn:
                frame, t0, ts = PreProcessor.frame_in.get()
                task = pool.apply_async(self.resize_frame,
                                        (frame, scale, t0, ts))
                pending.append(task)
                t = clock()
                self.latency.update(t - t0)
                self.frame_interval.update(t - self.last_frame_time)
                self.last_frame_time = t
            # log.debug("{},latency:{:0.1f}ms,process time:{:0.1f}ms".format(
            #     self.camera.cameraId, self.latency.value * 1000,
            #     self.frame_interval.value * 1000))

    def resize_frame(self, frame, scale, t0, ts):
        h, w, d = frame.shape
        if scale == 1:
            return (frame, t0, ts)
        f = cv2.resize(frame, (int(w * scale), int(h * scale)))
        return (f, t0, ts)
示例#3
0
    def run(self):
        log.info("ready to startup camera:{}'s' mtcnn detector".format(
            self.camera.cameraId))
        self.mtcnn_detector = test_net(self.gpuId)

        log.info(
            "camera:{}'s' mtcnn detector successfully startup......".format(
                self.camera.cameraId))
        log.info(self.mtcnn_detector)
        frame_stat = StatValue()
        humans_stat = StatValue()
        frame_interval_stat = StatValue()
        humans_latency_stat = StatValue()
        last_info_time = clock()
        stat_time = 30  # 一分钟统计一次
        while True:
            try:
                frame, t0, secs = DetectorProcessor.frame_in.get()
                humans = self.detect_humans(frame, t0, secs)
                frame_stat.update(frame_stat.value + 1)
                for human in humans:
                    if not DetectorProcessor.frame_out2.full():
                        DetectorProcessor.frame_out2.put(human)  # for 识别器
                        humans_stat.update(humans_stat.value + 1)
                    if self.camera.cameraType == CameraType.CAPTURE:
                        if not DetectorProcessor.frame_out.full():
                            DetectorProcessor.frame_out.put(
                                human)  # for Trainor
                tn = clock()
                self.frame_interval.update(tn - self.last_frame_time)
                self.latency.update(tn - t0)
                frame_interval_stat.update(frame_interval_stat.value +
                                           self.frame_interval.value)
                humans_latency_stat.update(humans_latency_stat.value +
                                           self.latency.value)
                if tn - last_info_time > stat_time:
                    time_cost = tn - last_info_time
                    last_info_time = tn
                    log.info(
                        "MTCNN->pid:{},{},平均帧率:{:0.1f}/s,平均延迟:{:0.1f}s,处理frame:{:0.1f}/s,处理人脸:{:0.1f}/s"
                        .format(self.pid, self.camera.cameraId,
                                frame_stat.value / time_cost,
                                humans_latency_stat.value / time_cost,
                                frame_interval_stat.value / time_cost,
                                humans_stat.value / time_cost))
                    frame_stat.update(0)
                    humans_latency_stat.update(0)
                    frame_interval_stat.update(0)
                    humans_stat.update(0)
                self.last_frame_time = tn
            except Exception as e:
                exc_type, exc_value, exc_traceback = sys.exc_info()
                log.error(
                    traceback.format_exception(exc_type, exc_value,
                                               exc_traceback))
示例#4
0
class DetectorProcessor(multiprocessing.Process):
    def __init__(self, camera, frame_in, frame_out, frame_out_2, gpuId):
        multiprocessing.Process.__init__(self, name="video_human_detector")
        DetectorProcessor.frame_in = frame_in
        DetectorProcessor.frame_out = frame_out
        DetectorProcessor.frame_out2 = frame_out_2
        self.camera = camera
        self.frame_interval = StatValue()
        self.last_frame_time = clock()
        self.latency = StatValue()
        self.gpuId = gpuId

    def run(self):
        log.info("ready to startup camera:{}'s' mtcnn detector".format(
            self.camera.cameraId))
        self.mtcnn_detector = test_net(self.gpuId)

        log.info(
            "camera:{}'s' mtcnn detector successfully startup......".format(
                self.camera.cameraId))
        log.info(self.mtcnn_detector)
        frame_stat = StatValue()
        humans_stat = StatValue()
        frame_interval_stat = StatValue()
        humans_latency_stat = StatValue()
        last_info_time = clock()
        stat_time = 30  # 一分钟统计一次
        while True:
            try:
                frame, t0, secs = DetectorProcessor.frame_in.get()
                humans = self.detect_humans(frame, t0, secs)
                frame_stat.update(frame_stat.value + 1)
                for human in humans:
                    if not DetectorProcessor.frame_out2.full():
                        DetectorProcessor.frame_out2.put(human)  # for 识别器
                        humans_stat.update(humans_stat.value + 1)
                    if self.camera.cameraType == CameraType.CAPTURE:
                        if not DetectorProcessor.frame_out.full():
                            DetectorProcessor.frame_out.put(
                                human)  # for Trainor
                tn = clock()
                self.frame_interval.update(tn - self.last_frame_time)
                self.latency.update(tn - t0)
                frame_interval_stat.update(frame_interval_stat.value +
                                           self.frame_interval.value)
                humans_latency_stat.update(humans_latency_stat.value +
                                           self.latency.value)
                if tn - last_info_time > stat_time:
                    time_cost = tn - last_info_time
                    last_info_time = tn
                    log.info(
                        "MTCNN->pid:{},{},平均帧率:{:0.1f}/s,平均延迟:{:0.1f}s,处理frame:{:0.1f}/s,处理人脸:{:0.1f}/s"
                        .format(self.pid, self.camera.cameraId,
                                frame_stat.value / time_cost,
                                humans_latency_stat.value / time_cost,
                                frame_interval_stat.value / time_cost,
                                humans_stat.value / time_cost))
                    frame_stat.update(0)
                    humans_latency_stat.update(0)
                    frame_interval_stat.update(0)
                    humans_stat.update(0)
                self.last_frame_time = tn
            except Exception as e:
                exc_type, exc_value, exc_traceback = sys.exc_info()
                log.error(
                    traceback.format_exception(exc_type, exc_value,
                                               exc_traceback))

    def detect_humans(self, image, t0, secs):
        validHuman = []
        try:
            if self.mtcnn_detector is None:
                log.error("mtcnn error!")
                return validHuman
            # log.info("image.shape:{}".format(image.shape))
            boxes, boxes_c = self.mtcnn_detector.detect_pnet(image)
            boxes, boxes_c = self.mtcnn_detector.detect_rnet(image, boxes_c)
            boxes, boxes_c = self.mtcnn_detector.detect_onet(image, boxes_c)

            if boxes_c is not None:
                #log.info("{} detected!".format(self.camera.cameraId))
                for b in boxes_c:
                    # cv2.rectangle(draw, (int(b[0]), int(b[1])),
                    #              (int(b[2]), int(b[3])), (0, 255, 255), 1)
                    center_x, center_y = ((int(b[0]) +
                                           abs(int(b[0]) - int(b[2])) / 2),
                                          (int(b[1]) +
                                           abs(int(b[1]) - int(b[3])) / 2))
                    log.debug("{}->{}:{}".format(self.camera.cameraId,
                                                 center_x, center_y))
                    w, h = (abs(int(b[0]) - int(b[2])) / 2,
                            abs(int(b[1]) - int(b[3])) / 2)
                    log.debug("{}->w:{},h:{}".format(self.camera.cameraId, w,
                                                     h))
                    # crop image and resize....
                    face_img = image.copy()[int(b[1]):int(b[3]),
                                            int(b[0]):int(b[2])]
                    face_img = cv2.resize(face_img, (96, 96),
                                          interpolation=cv2.INTER_AREA)
                    validHuman.append(
                        (face_img, t0, secs, (center_x, center_y), (w, h)))
        except Exception as e:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            log.error(
                traceback.format_exception(exc_type, exc_value, exc_traceback))
            log.error(e)
        finally:
            return validHuman
示例#5
0
    def startCapture(self):
        try:
            video = cv2.VideoCapture(self.cameraDevice)
            log.info("摄像头{}初始化参数".format(self.cameraName))
            for k, v in self.initCmd.values["video_properties"].items():
                video.set(k, v)
                log.info("video.set({},{})".format(k, v))
            forcc = self.initCmd.values[
                "fourcc"] if "fourcc" in self.initCmd.values else None
            if forcc is not None:
                video.set(
                    cv2.CAP_PROP_FOURCC,
                    cv2.VideoWriter_fourcc(forcc[0], forcc[1], forcc[2],
                                           forcc[3]))
            width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
            height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
            codec = video.get(cv2.CAP_PROP_FOURCC)
            self.resolution = (width, height)
            log.info("摄像头fps[{}] width:{} height:{} codec:{}".format(
                video.get(cv2.CAP_PROP_FPS), width, height, codec))
            log.info("亮度:{}".format(video.get(cv2.CAP_PROP_BRIGHTNESS)))
            log.info("对比度:{}".format(video.get(cv2.CAP_PROP_CONTRAST)))
            log.info("饱和度:{}".format(video.get(cv2.CAP_PROP_SATURATION)))
            log.info("色调:{}".format(video.get(cv2.CAP_PROP_HUE)))
            log.info("图像增益:{}".format(video.get(cv2.CAP_PROP_GAIN)))
            log.info("曝光:{}".format(video.get(cv2.CAP_PROP_EXPOSURE)))
            log.info("ISO:{}".format(video.get(cv2.CAP_PROP_ISO_SPEED)))
            log.info("RGB?:{}".format(video.get(cv2.CAP_PROP_CONVERT_RGB)))
            """
            # 处理方式 1
            frame_interval = StatValue()
            last_frame_time = clock()
            num = 10
            while True:
                try:
                    if self.stopped():
                        break
                    if video.grab():
                        ret, frame = video.retrieve()
                        frame_time = time.time()
                        if num % 50 == 0:
                            log.debug("读取摄像头{}frame{}".format(
                                self.cameraName, "成功" if ret else "失败!"))
                        t = clock()
                        frame_interval.update(t - last_frame_time)
                        if num % 50 == 0:
                            log.debug("摄像头{}.当前fps:{}".format(
                                self.cameraName,
                                int(1000 / (frame_interval.value * 1000))))
                        if ret:
                            if not self.camera.pre_process_queue.full():
                                self.camera.pre_process_queue.put_nowait(
                                    (frame, t, frame_time))
                            if not self.camera.frame_queue.full():
                                self.camera.frame_queue.put_nowait((frame, t,
                                                                   frame_time))
                        last_frame_time = t
                    num += 1
                except Exception, e:
                    log.info(e.message)
            """
            """处理方式2"""
            def process_frame(frame, t0, fts):
                if not self.camera.pre_process_queue.full():
                    self.camera.pre_process_queue.put((frame, t, fts))
                if not self.camera.frame_queue.full():
                    self.camera.frame_queue.put((frame, t, fts))
                return frame, t0, fts

            threadn = cv2.getNumberOfCPUs()
            pool = ThreadPool(processes=threadn)
            pending = deque()

            latency = StatValue()
            frame_interval = StatValue()
            last_frame_time = clock()
            while True:
                try:
                    while len(pending) > 0 and pending[0].ready():
                        frame, t0, fts = pending.popleft().get()
                        latency.update(clock() - t0)
                    if len(pending) < threadn:
                        frame_time = time.time()
                        ret, frame = video.read()
                        t = clock()
                        frame_interval.update(t - last_frame_time)
                        last_frame_time = t
                        if ret:
                            task = pool.apply_async(
                                process_frame, (frame.copy(), t, frame_time))
                            pending.append(task)
                except Exception as e:
                    exc_type, exc_value, exc_traceback = sys.exc_info()
                    log.error(
                        traceback.format_exception(exc_type, exc_value,
                                                   exc_traceback))

        except Exception as e:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            log.error(
                traceback.format_exception(exc_type, exc_value, exc_traceback))
示例#6
0
 def queue_monitor(self):
     pre_queue_stat = StatValue()
     human_detector_q_stat = StatValue()
     training_dset_q_stat = StatValue()
     recognizer_in_q_stat = StatValue()
     count_times = 1
     while True:
         pre_queue_stat.update(pre_queue_stat.value +
                               self.pre_process_queue.qsize())
         human_detector_q_stat.update(human_detector_q_stat.value +
                                      self.human_detector_q.qsize())
         training_dset_q_stat.update(training_dset_q_stat.value +
                                     self.training_dset_q.qsize())
         recognizer_in_q_stat.update(recognizer_in_q_stat.value +
                                     self.recognizer_in_q.qsize())
         if count_times > 10:
             log.info(
                 "{}预处理待处理堆积{:0.1f},探测器待处理堆积{:0.1f},训练器待处理堆积{:0.1f},识别器待处理堆积{:0.1f}"
                 .format(self.cameraId, pre_queue_stat.value / count_times,
                         human_detector_q_stat.value / count_times,
                         training_dset_q_stat.value / count_times,
                         recognizer_in_q_stat.value / count_times))
             count_times = 1
             pre_queue_stat.update(0)
             human_detector_q_stat.update(0)
             training_dset_q_stat.update(0)
             recognizer_in_q_stat.update(0)
         count_times += 1
         time.sleep(1)
示例#7
0
class HumanDetector():
    num = 1

    def __init__(self, *args, **kw):
        self.hog = cv2.HOGDescriptor()
        self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
        self.bodyClassifier = cv2.CascadeClassifier(
            os.path.join(harrsDir, "haarcascade_upperbody.xml"))
        self.faceClassifier = cv2.CascadeClassifier(
            os.path.join(harrsDir, "haarcascade_frontalface_alt.xml"))
        self.upperBodyClassifier = cv2.CascadeClassifier(
            os.path.join(harrsDir, "haarcascade_upperbody.xml"))
        self.HumanRecognizerProcessors = []
        self.frame_interval = StatValue()
        self.last_frame_time = clock()
        self.latency = StatValue()

    def detect_humans(self, cameraName, image, t0, secs):
        validHuman = []
        #for body in self.fullBodyHaarDetector(image):
        #cv2.imwrite("images/{}.body.jpg".format(self.num), body)
        faces = self.faceDetector(image)
        #if len(faces) > 0:
        #    log.debug("发现{}个人脸".format(len(faces)))
        #if len(faces) > 1:
        #    continue
        for face in faces:
            #cv2.imwrite("images/{}.face.jpg".format(self.num), face)
            validHuman.append((face, face, t0, secs))
        self.num += 1
        t = clock()
        self.latency.update(t - t0)
        self.frame_interval.update(t - self.last_frame_time)
        #if len(validHuman) > 0:
        #log.debug("发现有效人物目标{}个 图像延迟:{:0.1f} 目标检测器用时:{:0.1f} ms".format(
        #len(validHuman),self.latency.value * 1000, self.frame_interval.value * 1000))
        self.last_frame_time = t
        return validHuman

    def fullBodyDetector(self, image):
        found, w = self.hog.detectMultiScale(image,
                                             winStride=(8, 8),
                                             padding=(32, 32),
                                             scale=1.05)
        self.draw_detections(image, found)
        #print "found {} person".format(len(found))
        return self.cropImage(image, found)

    def fullBodyHaarDetector(self, image):
        try:
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            gray = cv2.equalizeHist(gray)
            #t = clock()
            rects = self.detect(gray, self.bodyClassifier)
            bodys = []
            #if len(rects) > 0:
            #    log.debug("发现{}个人体图像".format(len(rects)))
            for x1, y1, x2, y2 in rects:
                roi = image.copy()[y1:y2, x1:x2]
                bodys.append((roi, max(x1, x2) - abs(x1 - x2) / 2,
                              max(y1, y2) - abs(y1 - y2) / 2))
            #dt = clock() - t
            #draw_str(image, (20, 20), 'time: %.1f ms' % (dt*1000))
            return bodys
        except Exception, e:
            log.info(e.message)
            return []