コード例 #1
0
class StreamingTFObjectDetector(BaseTFObjectDetector):
    def __init__(self, config, output_q):
        super().__init__(config)
        self.output_q = output_q
        self.output_frame = SingletonBlockingQueue()
        self.active_video_feeds = 0

    def process_detection_intermeddiate(self, frame, orig_box, image_path):
        minx, miny, maxx, maxy, label, accuracy = orig_box
        outputFrame = self.tf_detector.DisplayDetection(frame, orig_box)
        if self.config.tf_od_frame_write:
            cv2.imwrite(image_path, outputFrame)
        if self.config.tf_od_annotation_write:
            imH, imW, _ = frame.shape
            writer = pascal_voc_writer.Writer(image_path, imW, imH)
            minX, minY, maxX, maxY, klass, confidence = orig_box
            writer.addObject(klass, minX, minY, maxX, maxY)
            writer.save(image_path.replace('.jpg', '.xml'))
        if self.config.show_fps:
            cv2.putText(outputFrame, "%.2f fps" % self.fps.fps,
                        (10, outputFrame.shape[0] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 255, 255), 1)
        self.output_frame.enqueue(outputFrame)

    def process_detection_final(self, label, accuracy, image_path):
        self.output_q.enqueue(
            (NotificationTypes.OBJECT_DETECTED, (label, accuracy, image_path)))

    def generate_output_frames(self):
        self.active_video_feeds += 1
        try:
            while True:
                outputFrame = self.output_frame.read(timeout=2)
                if outputFrame is not None:
                    # encode the frame in JPEG format
                    (flag, encodedImage) = cv2.imencode(".jpg", outputFrame)
                    # ensure the frame was successfully encoded
                    if not flag:
                        continue
                    # yield the output frame in the byte format
                    yield (b'--frame\r\n'
                           b'Content-Type: image/jpeg\r\n\r\n' +
                           bytearray(encodedImage) + b'\r\n')
        finally:
            self.active_video_feeds -= 1
コード例 #2
0
class StreamDetector():
    def __init__(self, config, broker_q, door_detector):
        self.outputFrame = SingletonBlockingQueue()
        self.active_video_feeds = 0
        self.config = config
        self.broker_q = broker_q
        self.door_detector = door_detector
        self.motion_detector = SimpleMotionDetector(config)

    def start(self):
        log.info("TFObjectDetector init START")
        self.od = StreamingTFObjectDetector(self.config, self.broker_q).start()

        if self.config.input_mode == InputMode.RTMP_STREAM:
            from input.rtmpstream import RTMPVideoStream
            self.vs = RTMPVideoStream(self.config.rtmp_stream_url).start()
        elif self.config.input_mode == InputMode.PI_CAM:
            from input.picamstream import PiVideoStream
            self.vs = PiVideoStream(resolution=(640, 480), framerate=30).start()
        elif self.config.input_mode == InputMode.VIDEO_FILE:
            from input.videofilestream import VideoFileStream
            self.vs = VideoFileStream(self.config.video_file_path).start()

        self.od.wait_for_ready()
        log.info("TFObjectDetector init END")

        # start a thread that will perform object detection
        log.info("detect_objects init..")
        t = threading.Thread(target=self.detect_objects)
        t.daemon = True
        t.start()
        return t

    def cleanup(self):
        self.vs.stop()

    def draw_masks(self, frame):
        if self.config.md_mask:
            xmin, ymin, xmax, ymax = self.config.md_mask
            cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (128, 0, 128), 1)

    def detect_objects(self):
        total = 0

        fps = FPS(50, 100)

        # loop over frames from the video stream
        while True:
            frame = self.vs.read()
            if frame is not None:
                output_frame = frame.copy()
                if self.config.tf_apply_md:
                    output_frame, crop, motion_outside = self.motion_detector.detect(output_frame)
                    if self.config.door_movement_detection:
                        door_state = self.door_detector.detect_door_state(frame, self.config.door_detect_open_door_contour)
                        self.door_detector.add_door_state(door_state)
                        self.door_detector.add_motion_state(motion_outside)
                        if self.config.door_detect_show_detection:
                            minX, minY, maxX, maxY = self.config.door_detect_open_door_contour
                            cv2.rectangle(output_frame, (minX, minY), (maxX, maxY), (0, 255, 0), 1)
                            cv2.putText(output_frame, door_state.name, (minX, minY - 7), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 255, 0), 1)
                    if crop is not None:
                        minX, minY, maxX, maxY = crop
                        cropped_frame = frame[minY:maxY, minX:maxX]
                        self.od.add_task((frame, cropped_frame, (minX, minY)))
                else:
                    self.od.add_task((frame, frame, (0, 0), None))

                self.draw_masks(output_frame)

                fps.count()

                if total % self.config.fps_print_frames == 0:
                    log.info("od=%.2f/md=%.2f/st=%.2f fps" % (self.od.fps.fps, fps.fps, self.vs.fps.fps))
                log.debug("total: %d" % total)
                total += 1

                if self.config.show_fps:
                    cv2.putText(output_frame,
                                "od=%.2f/md=%.2f/st=%.2f fps" % (self.od.fps.fps, fps.fps, self.vs.fps.fps),
                                (10, output_frame.shape[0] - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 255, 255), 1)
                self.outputFrame.enqueue(output_frame)

            else:
                log.info("frame is NONE")

            if self.config.md_frame_rate > 0:
                time.sleep(1 / self.config.md_frame_rate)
            if self.config.debug_mode:
                ch = getch()
                if ch == 'q':
                    break

    def generate(self):
        self.active_video_feeds += 1
        current_feed_num = self.active_video_feeds
        # loop over frames from the output stream
        try:
            while True:
                if self.config.video_feed_fps > 0:
                    time.sleep(1 / self.config.video_feed_fps)
                output_frame = self.outputFrame.read()
                # encode the frame in JPEG format
                (flag, encodedImage) = cv2.imencode(".jpg", output_frame)
                # ensure the frame was successfully encoded
                if not flag:
                    continue
                # yield the output frame in the byte format
                yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
                       bytearray(encodedImage) + b'\r\n')
        finally:
            self.active_video_feeds -= 1