예제 #1
0
 def object_detection(self, detector_queue):
     while True:
         frame = detector_queue.get()
         frame["frame"].objects = self.ObjectDetection.return_objects(frame)
         pop_if_full(
             frame["object_return_queue"], frame,
         )
예제 #2
0
    def object_detection(self, detector_queue):
        while True:
            self.filtered_objects = []

            frame = detector_queue.get()
            object_event = frame["object_event"]

            objects = self.ObjectDetection.return_objects(frame["frame"])

            if objects:
                LOGGER.debug(objects)

            self.filtered_objects = list(filter(self.filter_objects, objects))

            if self.filtered_objects:
                pop_if_full(
                    frame["object_return_queue"],
                    {
                        "frame": frame["frame"],
                        "full_frame": frame["full_frame"],
                        "objects": self.filtered_objects,
                    },
                )

                if not object_event.is_set():
                    object_event.set()
                continue

            if object_event.is_set():
                object_event.clear()
예제 #3
0
파일: motion.py 프로젝트: TimDowker/viseron
 def motion_detection(self, motion_queue):
     while True:
         frame = motion_queue.get()
         frame["frame"].motion_contours = self.detect(frame)
         pop_if_full(
             frame["motion_return_queue"],
             frame,
         )
예제 #4
0
 def object_detection(self, detector_queue):
     while True:
         frame = detector_queue.get()
         self.detection_lock.acquire()
         frame["frame"].objects = self.object_detector.return_objects(frame)
         self.detection_lock.release()
         pop_if_full(
             frame["object_return_queue"],
             frame,
         )
예제 #5
0
파일: camera.py 프로젝트: TimDowker/viseron
    def decoder(self, input_queue, output_queue, width, height):
        """Decodes the frame, leaves any other potential keys in the dict untouched"""
        self._logger.debug("Starting decoder thread")
        while True:
            input_item = input_queue.get()
            if input_item["frame"].decode_frame():
                input_item["frame"].resize(input_item["decoder_name"], width, height)
                pop_if_full(output_queue, input_item)
                continue

            self._logger.error("Unable to decode frame. FFMPEG pipe seems broken")
            self._connection_error = True

        self._logger.debug("Exiting decoder thread")
예제 #6
0
파일: camera.py 프로젝트: vageesh79/viseron
    def decoder(self, input_queue, output_queue, width, height):
        """Decodes the frame, leaves any other potential keys in the dict untouched"""
        LOGGER.info("Starting decoder thread")
        while True:
            input_item = input_queue.get()
            ret, frame = self.decode_frame(input_item["raw_frame"])
            if ret:
                self.current_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_NV21)
                input_item["full_frame"] = self.current_frame
                input_item["frame"] = cv2.resize(
                    self.current_frame, (width, height), interpolation=cv2.INTER_LINEAR,
                )
                pop_if_full(output_queue, input_item)

        LOGGER.info("Exiting decoder thread")
예제 #7
0
파일: camera.py 프로젝트: vageesh79/viseron
    def capture_pipe(
        self,
        frame_buffer,
        frame_ready,
        object_decoder_interval,
        object_decoder_queue,
        scan_for_objects,
        object_event,
        object_return_queue,
        motion_decoder_interval,
        motion_decoder_queue,
        scan_for_motion,
    ):
        LOGGER.info("Starting capture process")

        pipe = self.rtsp_pipe()

        self.connected = True
        object_frame_number = 0
        object_decoder_interval_calculated = int(
            object_decoder_interval * self.stream_fps
        )
        motion_frame_number = 0
        motion_decoder_interval_calculated = int(
            motion_decoder_interval * self.stream_fps
        )

        bytes_to_read = int(self.stream_width * self.stream_height * 1.5)

        while self.connected:
            if self.connection_error:
                sleep(5)
                LOGGER.error("Restarting frame pipe")
                pipe.terminate()
                pipe.communicate()
                pipe = self.rtsp_pipe()
                self.connection_error = False

            self.raw_image = pipe.stdout.read(bytes_to_read)
            pop_if_full(frame_buffer, {"frame": self.raw_image})

            if scan_for_objects.is_set():
                if object_frame_number % object_decoder_interval_calculated == 0:
                    object_frame_number = 0
                    pop_if_full(
                        object_decoder_queue,
                        {
                            "raw_frame": self.raw_image,
                            "object_event": object_event,
                            "object_return_queue": object_return_queue,
                        },
                    )

                object_frame_number += 1
            else:
                object_frame_number = 0

            if scan_for_motion.is_set():
                if motion_frame_number % motion_decoder_interval_calculated == 0:
                    motion_frame_number = 0
                    pop_if_full(motion_decoder_queue, {"raw_frame": self.raw_image})

                motion_frame_number += 1
            else:
                motion_frame_number = 0

            frame_ready.set()
            frame_ready.clear()

        frame_ready.set()
        pipe.terminate()
        pipe.communicate()
        LOGGER.info("FFMPEG frame grabber stopped")
예제 #8
0
    def capture_pipe(
        self,
        object_decoder_interval,
        object_decoder_queue,
        object_return_queue,
        motion_decoder_interval,
        motion_decoder_queue,
        motion_return_queue,
    ):
        self._logger.debug("Starting capture thread")
        self._connected = True

        self.stream.start_pipe()
        if self._segments:
            self._segments.start_pipe()

        object_frame_number = 0
        object_first_scan = False
        object_decoder_interval_calculated = round(object_decoder_interval *
                                                   self.stream.fps)
        self._logger.debug(
            f"Running object detection at {object_decoder_interval}s interval, "
            f"every {object_decoder_interval_calculated} frame(s)")

        motion_frame_number = 0
        motion_decoder_interval_calculated = round(motion_decoder_interval *
                                                   self.stream.fps)
        self._logger.debug(
            f"Running motion detection at {motion_decoder_interval}s interval, "
            f"every {motion_decoder_interval_calculated} frame(s)")

        while self._connected:
            if self._connection_error:
                sleep(5)
                self._logger.error("Restarting frame pipe")
                self.stream.close_pipe()
                self.stream.check_command()
                self.stream.start_pipe()
                self._connection_error = False

            current_frame = self.stream.read()
            if self.scan_for_objects.is_set():
                if object_frame_number % object_decoder_interval_calculated == 0:
                    if object_first_scan:
                        # force motion detection on same frame to save computing power
                        motion_frame_number = 0
                        object_first_scan = False
                    object_frame_number = 0
                    pop_if_full(
                        object_decoder_queue,
                        {
                            "decoder_name": "object_detection",
                            "frame": current_frame,
                            "object_return_queue": object_return_queue,
                            "camera_config": self._config,
                        },
                        logger=self._logger,
                        name="object_decoder_queue",
                        warn=True,
                    )

                object_frame_number += 1
            else:
                object_frame_number = 0
                object_first_scan = True

            if self.scan_for_motion.is_set():
                if motion_frame_number % motion_decoder_interval_calculated == 0:
                    motion_frame_number = 0
                    pop_if_full(
                        motion_decoder_queue,
                        {
                            "decoder_name": "motion_detection",
                            "frame": current_frame,
                            "motion_return_queue": motion_return_queue,
                        },
                        logger=self._logger,
                        name="motion_decoder_queue",
                        warn=True,
                    )

                motion_frame_number += 1
            else:
                motion_frame_number = 0

            self.frame_ready.set()
            self.frame_ready.clear()

        self.stream.close_pipe()
        if self._segments:
            self._segments.close_pipe()
        self._logger.info("FFMPEG frame grabber stopped")
예제 #9
0
파일: camera.py 프로젝트: TimDowker/viseron
    def capture_pipe(
        self,
        object_decoder_interval,
        object_decoder_queue,
        object_return_queue,
        motion_decoder_interval,
        motion_decoder_queue,
        motion_return_queue,
    ):
        self._logger.debug("Starting capture thread")
        # First read a single frame to make sure the ffmpeg command is correct
        bytes_to_read = int(self.stream_width * self.stream_height * 1.5)
        retry = False
        while True:
            pipe = self.pipe(stderr=True, single_frame=True)
            _, stderr = pipe.communicate()
            if stderr and FFMPEG_ERROR_WHILE_DECODING not in stderr.decode():
                self._logger.error(
                    f"Error starting decoder pipe! {stderr.decode()} "
                    f"Retrying in 5 seconds"
                )
                sleep(5)
                retry = True
                continue
            if retry:
                self._logger.info("Succesful reconnection!")
            break

        pipe = self.pipe()
        self._connected = True

        object_frame_number = 0
        object_first_scan = False
        object_decoder_interval_calculated = round(
            object_decoder_interval * self.stream_fps
        )
        self._logger.debug(
            f"Running object detection at {object_decoder_interval}s interval, "
            f"every {object_decoder_interval_calculated} frame(s)"
        )

        motion_frame_number = 0
        motion_decoder_interval_calculated = round(
            motion_decoder_interval * self.stream_fps
        )
        self._logger.debug(
            f"Running motion detection at {motion_decoder_interval}s interval, "
            f"every {motion_decoder_interval_calculated} frame(s)"
        )

        while self._connected:
            if self._connection_error:
                sleep(5)
                self._logger.error("Restarting frame pipe")
                pipe.terminate()
                pipe.communicate()
                pipe = self.pipe()
                self._connection_error = False

            current_frame = Frame(
                pipe.stdout.read(bytes_to_read), self.stream_width, self.stream_height
            )
            pop_if_full(self._frame_buffer, current_frame)

            if self.scan_for_objects.is_set():
                if object_frame_number % object_decoder_interval_calculated == 0:
                    if object_first_scan:
                        # force motion detection on same frame to save computing power
                        motion_frame_number = 0
                        object_first_scan = False
                    object_frame_number = 0
                    pop_if_full(
                        object_decoder_queue,
                        {
                            "decoder_name": "object_detection",
                            "frame": current_frame,
                            "object_return_queue": object_return_queue,
                            "camera_config": self._config,
                        },
                    )

                object_frame_number += 1
            else:
                object_frame_number = 0
                object_first_scan = True

            if self.scan_for_motion.is_set():
                if motion_frame_number % motion_decoder_interval_calculated == 0:
                    motion_frame_number = 0
                    pop_if_full(
                        motion_decoder_queue,
                        {
                            "decoder_name": "motion_detection",
                            "frame": current_frame,
                            "motion_return_queue": motion_return_queue,
                        },
                    )

                motion_frame_number += 1
            else:
                motion_frame_number = 0

            self.frame_ready.set()
            self.frame_ready.clear()

        pipe.terminate()
        pipe.communicate()
        self._logger.info("FFMPEG frame grabber stopped")