示例#1
0
    def __init__(self, tf_device=None, labels=None):
        self.fps = EventsPerSecond()
        if labels is None:
            self.labels = {}
        else:
            self.labels = load_labels(labels)

        device_config = {"device": "usb"}
        if not tf_device is None:
            device_config = {"device": tf_device}

        edge_tpu_delegate = None

        if tf_device != 'cpu':
            try:
                print(f"Attempting to load TPU as {device_config['device']}")
                edge_tpu_delegate = load_delegate('libedgetpu.so.1.0',
                                                  device_config)
                print("TPU found")
            except ValueError:
                print("No EdgeTPU detected. Falling back to CPU.")

        if edge_tpu_delegate is None:
            self.interpreter = tflite.Interpreter(
                model_path='/cpu_model.tflite')
        else:
            self.interpreter = tflite.Interpreter(
                model_path='/edgetpu_model.tflite',
                experimental_delegates=[edge_tpu_delegate])

        self.interpreter.allocate_tensors()

        self.tensor_input_details = self.interpreter.get_input_details()
        self.tensor_output_details = self.interpreter.get_output_details()
示例#2
0
文件: edgetpu.py 项目: gpete/frigate
    def __init__(self, tf_device=None, num_threads=3, labels=None):
        self.fps = EventsPerSecond()
        if labels is None:
            self.labels = {}
        else:
            self.labels = load_labels(labels)

        device_config = {"device": "usb"}
        if not tf_device is None:
            device_config = {"device": tf_device}

        edge_tpu_delegate = None

        if tf_device != "cpu":
            try:
                logger.info(
                    f"Attempting to load TPU as {device_config['device']}")
                edge_tpu_delegate = load_delegate("libedgetpu.so.1.0",
                                                  device_config)
                logger.info("TPU found")
                self.interpreter = tflite.Interpreter(
                    model_path="/edgetpu_model.tflite",
                    experimental_delegates=[edge_tpu_delegate],
                )
            except ValueError:
                logger.info("No EdgeTPU detected.")
                raise
        else:
            self.interpreter = tflite.Interpreter(
                model_path="/cpu_model.tflite", num_threads=num_threads)

        self.interpreter.allocate_tensors()

        self.tensor_input_details = self.interpreter.get_input_details()
        self.tensor_output_details = self.interpreter.get_output_details()
示例#3
0
class CameraCapture(threading.Thread):
    def __init__(self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps):
        threading.Thread.__init__(self)
        self.name = f"capture:{camera_name}"
        self.camera_name = camera_name
        self.frame_shape = frame_shape
        self.frame_queue = frame_queue
        self.fps = fps
        self.skipped_fps = EventsPerSecond()
        self.frame_manager = SharedMemoryFrameManager()
        self.ffmpeg_process = ffmpeg_process
        self.current_frame = mp.Value("d", 0.0)
        self.last_frame = 0

    def run(self):
        self.skipped_fps.start()
        capture_frames(
            self.ffmpeg_process,
            self.camera_name,
            self.frame_shape,
            self.frame_manager,
            self.frame_queue,
            self.fps,
            self.skipped_fps,
            self.current_frame,
        )
示例#4
0
class RemoteObjectDetector():
    def __init__(self, name, labels, detection_queue):
        self.labels = load_labels(labels)
        self.name = name
        self.fps = EventsPerSecond()
        self.plasma_client = plasma.connect("/tmp/plasma")
        self.detection_queue = detection_queue
    
    def detect(self, tensor_input, threshold=.4):
        detections = []

        now = f"{self.name}-{str(datetime.datetime.now().timestamp())}"
        object_id_frame = plasma.ObjectID(hashlib.sha1(str.encode(now)).digest())
        object_id_detections = plasma.ObjectID(hashlib.sha1(str.encode(f"out-{now}")).digest())
        self.plasma_client.put(tensor_input, object_id_frame)
        self.detection_queue.put(now)
        raw_detections = self.plasma_client.get(object_id_detections, timeout_ms=10000)

        if raw_detections is plasma.ObjectNotAvailable:
            self.plasma_client.delete([object_id_frame])
            return detections

        for d in raw_detections:
            if d[1] < threshold:
                break
            detections.append((
                self.labels[int(d[0])],
                float(d[1]),
                (d[2], d[3], d[4], d[5])
            ))
        self.plasma_client.delete([object_id_frame, object_id_detections])
        self.fps.update()
        return detections
示例#5
0
 def load_frames(self):
     fps = EventsPerSecond()
     skipped_fps = EventsPerSecond()
     current_frame = mp.Value('d', 0.0)
     frame_size = self.camera_config.frame_shape_yuv[0] * self.camera_config.frame_shape_yuv[1]
     ffmpeg_process = start_or_restart_ffmpeg(self.ffmpeg_cmd, logger, sp.DEVNULL, frame_size)
     capture_frames(ffmpeg_process, self.camera_name, self.camera_config.frame_shape_yuv, self.frame_manager, 
         self.frame_queue, fps, skipped_fps, current_frame)
     ffmpeg_process.wait()
     ffmpeg_process.communicate()
示例#6
0
 def __init__(self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps):
     threading.Thread.__init__(self)
     self.name = f"capture:{camera_name}"
     self.camera_name = camera_name
     self.frame_shape = frame_shape
     self.frame_queue = frame_queue
     self.fps = fps
     self.skipped_fps = EventsPerSecond()
     self.frame_manager = SharedMemoryFrameManager()
     self.ffmpeg_process = ffmpeg_process
     self.current_frame = mp.Value('d', 0.0)
     self.last_frame = 0
示例#7
0
 def __init__(self, name, ffmpeg_process, frame_shape, frame_queue, take_frame, fps, detection_frame):
     threading.Thread.__init__(self)
     self.name = name
     self.frame_shape = frame_shape
     self.frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
     self.frame_queue = frame_queue
     self.take_frame = take_frame
     self.fps = fps
     self.skipped_fps = EventsPerSecond()
     self.plasma_client = PlasmaManager()
     self.ffmpeg_process = ffmpeg_process
     self.current_frame = 0
     self.last_frame = 0
     self.detection_frame = detection_frame
示例#8
0
 def __init__(self, name, labels, detection_queue, event, model_shape):
     self.labels = labels
     self.name = name
     self.fps = EventsPerSecond()
     self.detection_queue = detection_queue
     self.event = event
     self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
     self.np_shm = np.ndarray(
         (1, model_shape[0], model_shape[1], 3), dtype=np.uint8, buffer=self.shm.buf
     )
     self.out_shm = mp.shared_memory.SharedMemory(
         name=f"out-{self.name}", create=False
     )
     self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)
示例#9
0
文件: video.py 项目: yllar/frigate
 def __init__(self, name, ffmpeg_process, frame_shape, frame_queue, take_frame, fps, stop_event):
     threading.Thread.__init__(self)
     self.name = name
     self.frame_shape = frame_shape
     self.frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
     self.frame_queue = frame_queue
     self.take_frame = take_frame
     self.fps = fps
     self.skipped_fps = EventsPerSecond()
     self.frame_manager = SharedMemoryFrameManager()
     self.ffmpeg_process = ffmpeg_process
     self.current_frame = mp.Value('d', 0.0)
     self.last_frame = 0
     self.stop_event = stop_event
示例#10
0
    def __init__(self,
                 tf_device=None,
                 model_path=None,
                 num_threads=3,
                 labels=None):
        self.fps = EventsPerSecond()
        if labels is None:
            self.labels = {}
        else:
            self.labels = load_labels(labels)

        device_config = {"device": "usb"}
        if not tf_device is None:
            device_config = {"device": tf_device}

        edge_tpu_delegate = None

        if tf_device != "cpu":
            try:
                logger.info(
                    f"Attempting to load TPU as {device_config['device']}")
                edge_tpu_delegate = load_delegate("libedgetpu.so.1.0",
                                                  device_config)
                logger.info("TPU found")
                self.interpreter = tflite.Interpreter(
                    model_path=model_path or "/edgetpu_model.tflite",
                    experimental_delegates=[edge_tpu_delegate],
                )
            except ValueError:
                logger.error(
                    "No EdgeTPU was detected. If you do not have a Coral device yet, you must configure CPU detectors."
                )
                raise
        else:
            logger.warning(
                "CPU detectors are not recommended and should only be used for testing or for trial purposes."
            )
            self.interpreter = tflite.Interpreter(model_path=model_path
                                                  or "/cpu_model.tflite",
                                                  num_threads=num_threads)

        self.interpreter.allocate_tensors()

        self.tensor_input_details = self.interpreter.get_input_details()
        self.tensor_output_details = self.interpreter.get_output_details()
示例#11
0
 def load_frames(self):
     fps = EventsPerSecond()
     skipped_fps = EventsPerSecond()
     stop_event = mp.Event()
     detection_frame = mp.Value(
         'd',
         datetime.datetime.now().timestamp() + 100000)
     current_frame = mp.Value('d', 0.0)
     ffmpeg_cmd = f"ffmpeg -hide_banner -loglevel panic -i {self.clip_path} -f rawvideo -pix_fmt rgb24 pipe:".split(
         " ")
     ffmpeg_process = start_or_restart_ffmpeg(
         ffmpeg_cmd,
         self.frame_shape[0] * self.frame_shape[1] * self.frame_shape[2])
     capture_frames(ffmpeg_process, self.camera_name, self.frame_shape,
                    self.frame_manager, self.frame_queue, 1, fps,
                    skipped_fps, stop_event, detection_frame, current_frame)
     ffmpeg_process.wait()
     ffmpeg_process.communicate()
示例#12
0
class RemoteObjectDetector:
    def __init__(self, name, labels, detection_queue, event, model_shape):
        self.labels = labels
        self.name = name
        self.fps = EventsPerSecond()
        self.detection_queue = detection_queue
        self.event = event
        self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
        self.np_shm = np.ndarray(
            (1, model_shape[0], model_shape[1], 3), dtype=np.uint8, buffer=self.shm.buf
        )
        self.out_shm = mp.shared_memory.SharedMemory(
            name=f"out-{self.name}", create=False
        )
        self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)

    def detect(self, tensor_input, threshold=0.4):
        detections = []

        # copy input to shared memory
        self.np_shm[:] = tensor_input[:]
        self.event.clear()
        self.detection_queue.put(self.name)
        result = self.event.wait(timeout=10.0)

        # if it timed out
        if result is None:
            return detections

        for d in self.out_np_shm:
            if d[1] < threshold:
                break
            detections.append(
                (self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
            )
        self.fps.update()
        return detections

    def cleanup(self):
        self.shm.unlink()
        self.out_shm.unlink()
示例#13
0
def capture_frames(ffmpeg_process, camera_name, frame_shape,
                   frame_manager: FrameManager, frame_queue, take_frame: int,
                   fps: EventsPerSecond, skipped_fps: EventsPerSecond,
                   stop_event: mp.Event, current_frame: mp.Value):

    frame_num = 0
    frame_size = frame_shape[0] * frame_shape[1] * 3 // 2
    skipped_fps.start()
    while True:
        if stop_event.is_set():
            print(f"{camera_name}: stop event set. exiting capture thread...")
            break

        frame_bytes = ffmpeg_process.stdout.read(frame_size)
        current_frame.value = datetime.datetime.now().timestamp()

        if len(frame_bytes) < frame_size:
            print(
                f"{camera_name}: ffmpeg sent a broken frame. something is wrong."
            )

            if ffmpeg_process.poll() != None:
                print(
                    f"{camera_name}: ffmpeg process is not running. exiting capture thread..."
                )
                break
            else:
                continue

        fps.update()

        frame_num += 1
        if (frame_num % take_frame) != 0:
            skipped_fps.update()
            continue

        # if the queue is full, skip this frame
        if frame_queue.full():
            skipped_fps.update()
            continue

        # put the frame in the frame manager
        frame_buffer = frame_manager.create(
            f"{camera_name}{current_frame.value}", frame_size)
        frame_buffer[:] = frame_bytes[:]
        frame_manager.close(f"{camera_name}{current_frame.value}")

        # add to the queue
        frame_queue.put(current_frame.value)
示例#14
0
class CameraCapture(threading.Thread):
    def __init__(self, name, ffmpeg_process, frame_shape, frame_queue,
                 take_frame, fps, detection_frame):
        threading.Thread.__init__(self)
        self.name = name
        self.frame_shape = frame_shape
        self.frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
        self.frame_queue = frame_queue
        self.take_frame = take_frame
        self.fps = fps
        self.skipped_fps = EventsPerSecond()
        self.plasma_client = PlasmaManager()
        self.ffmpeg_process = ffmpeg_process
        self.current_frame = 0
        self.last_frame = 0
        self.detection_frame = detection_frame

    def run(self):
        frame_num = 0
        self.skipped_fps.start()
        while True:
            if self.ffmpeg_process.poll() != None:
                print(
                    f"{self.name}: ffmpeg process is not running. exiting capture thread..."
                )
                break

            frame_bytes = self.ffmpeg_process.stdout.read(self.frame_size)
            self.current_frame = datetime.datetime.now().timestamp()

            if len(frame_bytes) == 0:
                print(
                    f"{self.name}: ffmpeg didnt return a frame. something is wrong."
                )
                continue

            self.fps.update()

            frame_num += 1
            if (frame_num % self.take_frame) != 0:
                self.skipped_fps.update()
                continue

            # if the detection process is more than 1 second behind, skip this frame
            if self.detection_frame.value > 0.0 and (
                    self.last_frame - self.detection_frame.value) > 1:
                self.skipped_fps.update()
                continue

            # put the frame in the plasma store
            self.plasma_client.put(
                f"{self.name}{self.current_frame}",
                np.frombuffer(frame_bytes, np.uint8).reshape(self.frame_shape))
            # add to the queue
            self.frame_queue.put(self.current_frame)
            self.last_frame = self.current_frame
示例#15
0
def process_frames(camera_name: str,
                   frame_queue: mp.Queue,
                   frame_shape,
                   frame_manager: FrameManager,
                   motion_detector: MotionDetector,
                   object_detector: RemoteObjectDetector,
                   object_tracker: ObjectTracker,
                   detected_objects_queue: mp.Queue,
                   fps: mp.Value,
                   detection_fps: mp.Value,
                   current_frame_time: mp.Value,
                   objects_to_track: List[str],
                   object_filters: Dict,
                   mask,
                   stop_event: mp.Event,
                   exit_on_empty: bool = False):

    fps_tracker = EventsPerSecond()
    fps_tracker.start()

    while True:
        if stop_event.is_set() or (exit_on_empty and frame_queue.empty()):
            print(f"Exiting track_objects...")
            break

        try:
            frame_time = frame_queue.get(True, 10)
        except queue.Empty:
            continue

        current_frame_time.value = frame_time

        frame = frame_manager.get(f"{camera_name}{frame_time}",
                                  (frame_shape[0] * 3 // 2, frame_shape[1]))

        if frame is None:
            print(f"{camera_name}: frame {frame_time} is not in memory store.")
            continue

        fps_tracker.update()
        fps.value = fps_tracker.eps()

        # look for motion
        motion_boxes = motion_detector.detect(frame)

        tracked_object_boxes = [
            obj['box'] for obj in object_tracker.tracked_objects.values()
        ]

        # combine motion boxes with known locations of existing objects
        combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)

        # compute regions
        regions = [
            calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
            for a in combined_boxes
        ]

        # combine overlapping regions
        combined_regions = reduce_boxes(regions)

        # re-compute regions
        regions = [
            calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.0)
            for a in combined_regions
        ]

        # resize regions and detect
        detections = []
        for region in regions:
            detections.extend(
                detect(object_detector, frame, region, objects_to_track,
                       object_filters, mask))

        #########
        # merge objects, check for clipped objects and look again up to 4 times
        #########
        refining = True
        refine_count = 0
        while refining and refine_count < 4:
            refining = False

            # group by name
            detected_object_groups = defaultdict(lambda: [])
            for detection in detections:
                detected_object_groups[detection[0]].append(detection)

            selected_objects = []
            for group in detected_object_groups.values():

                # apply non-maxima suppression to suppress weak, overlapping bounding boxes
                boxes = [(o[2][0], o[2][1], o[2][2] - o[2][0],
                          o[2][3] - o[2][1]) for o in group]
                confidences = [o[1] for o in group]
                idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)

                for index in idxs:
                    obj = group[index[0]]
                    if clipped(obj, frame_shape):
                        box = obj[2]
                        # calculate a new region that will hopefully get the entire object
                        region = calculate_region(frame_shape, box[0], box[1],
                                                  box[2], box[3])

                        selected_objects.extend(
                            detect(object_detector, frame, region,
                                   objects_to_track, object_filters, mask))

                        refining = True
                    else:
                        selected_objects.append(obj)
            # set the detections list to only include top, complete objects
            # and new detections
            detections = selected_objects

            if refining:
                refine_count += 1

        # now that we have refined our detections, we need to track objects
        object_tracker.match_and_update(frame_time, detections)

        # add to the queue
        detected_objects_queue.put(
            (camera_name, frame_time, object_tracker.tracked_objects))

        detection_fps.value = object_detector.fps.eps()

        frame_manager.close(f"{camera_name}{frame_time}")
示例#16
0
class LocalObjectDetector(ObjectDetector):
    def __init__(self, tf_device=None, model_path=None, num_threads=3, labels=None):
        self.fps = EventsPerSecond()
        if labels is None:
            self.labels = {}
        else:
            self.labels = load_labels(labels)

        device_config = {"device": "usb"}
        if not tf_device is None:
            device_config = {"device": tf_device}

        edge_tpu_delegate = None

        if tf_device != "cpu":
            try:
                logger.info(f"Attempting to load TPU as {device_config['device']}")
                edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config)
                logger.info("TPU found")
                self.interpreter = tflite.Interpreter(
                    model_path=model_path or "/edgetpu_model.tflite",
                    experimental_delegates=[edge_tpu_delegate],
                )
            except ValueError:
                logger.error(
                    "No EdgeTPU was detected. If you do not have a Coral device yet, you must configure CPU detectors."
                )
                raise
        else:
            logger.warning(
                "CPU detectors are not recommended and should only be used for testing or for trial purposes."
            )
            self.interpreter = tflite.Interpreter(
                model_path=model_path or "/cpu_model.tflite", num_threads=num_threads
            )

        self.interpreter.allocate_tensors()

        self.tensor_input_details = self.interpreter.get_input_details()
        self.tensor_output_details = self.interpreter.get_output_details()

    def detect(self, tensor_input, threshold=0.4):
        detections = []

        raw_detections = self.detect_raw(tensor_input)

        for d in raw_detections:
            if d[1] < threshold:
                break
            detections.append(
                (self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
            )
        self.fps.update()
        return detections

    def detect_raw(self, tensor_input):
        self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
        self.interpreter.invoke()

        boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
        class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
        scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
        count = int(
            self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
        )

        detections = np.zeros((20, 6), np.float32)

        for i in range(count):
            if scores[i] < 0.4 or i == 20:
                break
            detections[i] = [
                class_ids[i],
                float(scores[i]),
                boxes[i][0],
                boxes[i][1],
                boxes[i][2],
                boxes[i][3],
            ]

        return detections
示例#17
0
def process_frames(
    camera_name: str,
    frame_queue: mp.Queue,
    frame_shape,
    model_shape,
    detect_config: DetectConfig,
    frame_manager: FrameManager,
    motion_detector: MotionDetector,
    object_detector: RemoteObjectDetector,
    object_tracker: ObjectTracker,
    detected_objects_queue: mp.Queue,
    process_info: Dict,
    objects_to_track: List[str],
    object_filters,
    detection_enabled: mp.Value,
    stop_event,
    exit_on_empty: bool = False,
):

    fps = process_info["process_fps"]
    detection_fps = process_info["detection_fps"]
    current_frame_time = process_info["detection_frame"]

    fps_tracker = EventsPerSecond()
    fps_tracker.start()

    startup_scan_counter = 0

    while not stop_event.is_set():
        if exit_on_empty and frame_queue.empty():
            logger.info(f"Exiting track_objects...")
            break

        try:
            frame_time = frame_queue.get(True, 10)
        except queue.Empty:
            continue

        current_frame_time.value = frame_time

        frame = frame_manager.get(
            f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1])
        )

        if frame is None:
            logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
            continue

        # look for motion
        motion_boxes = motion_detector.detect(frame)

        regions = []

        # if detection is disabled
        if not detection_enabled.value:
            object_tracker.match_and_update(frame_time, [])
        else:
            # get stationary object ids
            # check every Nth frame for stationary objects
            # disappeared objects are not stationary
            # also check for overlapping motion boxes
            stationary_object_ids = [
                obj["id"]
                for obj in object_tracker.tracked_objects.values()
                # if there hasn't been motion for 10 frames
                if obj["motionless_count"] >= 10
                # and it isn't due for a periodic check
                and (
                    detect_config.stationary.interval == 0
                    or obj["motionless_count"] % detect_config.stationary.interval != 0
                )
                # and it hasn't disappeared
                and object_tracker.disappeared[obj["id"]] == 0
                # and it doesn't overlap with any current motion boxes
                and not intersects_any(obj["box"], motion_boxes)
            ]

            # get tracked object boxes that aren't stationary
            tracked_object_boxes = [
                obj["box"]
                for obj in object_tracker.tracked_objects.values()
                if not obj["id"] in stationary_object_ids
            ]

            # combine motion boxes with known locations of existing objects
            combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)

            region_min_size = max(model_shape[0], model_shape[1])
            # compute regions
            regions = [
                calculate_region(
                    frame_shape,
                    a[0],
                    a[1],
                    a[2],
                    a[3],
                    region_min_size,
                    multiplier=random.uniform(1.2, 1.5),
                )
                for a in combined_boxes
            ]

            # consolidate regions with heavy overlap
            regions = [
                calculate_region(
                    frame_shape, a[0], a[1], a[2], a[3], region_min_size, multiplier=1.0
                )
                for a in reduce_boxes(regions, 0.4)
            ]

            # if starting up, get the next startup scan region
            if startup_scan_counter < 9:
                ymin = int(frame_shape[0] / 3 * startup_scan_counter / 3)
                ymax = int(frame_shape[0] / 3 + ymin)
                xmin = int(frame_shape[1] / 3 * startup_scan_counter / 3)
                xmax = int(frame_shape[1] / 3 + xmin)
                regions.append(
                    calculate_region(
                        frame_shape,
                        xmin,
                        ymin,
                        xmax,
                        ymax,
                        region_min_size,
                        multiplier=1.2,
                    )
                )
                startup_scan_counter += 1

            # resize regions and detect
            # seed with stationary objects
            detections = [
                (
                    obj["label"],
                    obj["score"],
                    obj["box"],
                    obj["area"],
                    obj["region"],
                )
                for obj in object_tracker.tracked_objects.values()
                if obj["id"] in stationary_object_ids
            ]

            for region in regions:
                detections.extend(
                    detect(
                        object_detector,
                        frame,
                        model_shape,
                        region,
                        objects_to_track,
                        object_filters,
                    )
                )

            #########
            # merge objects, check for clipped objects and look again up to 4 times
            #########
            refining = len(regions) > 0
            refine_count = 0
            while refining and refine_count < 4:
                refining = False

                # group by name
                detected_object_groups = defaultdict(lambda: [])
                for detection in detections:
                    detected_object_groups[detection[0]].append(detection)

                selected_objects = []
                for group in detected_object_groups.values():

                    # apply non-maxima suppression to suppress weak, overlapping bounding boxes
                    boxes = [
                        (o[2][0], o[2][1], o[2][2] - o[2][0], o[2][3] - o[2][1])
                        for o in group
                    ]
                    confidences = [o[1] for o in group]
                    idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)

                    for index in idxs:
                        obj = group[index[0]]
                        if clipped(obj, frame_shape):
                            box = obj[2]
                            # calculate a new region that will hopefully get the entire object
                            region = calculate_region(
                                frame_shape,
                                box[0],
                                box[1],
                                box[2],
                                box[3],
                                region_min_size,
                            )

                            regions.append(region)

                            selected_objects.extend(
                                detect(
                                    object_detector,
                                    frame,
                                    model_shape,
                                    region,
                                    objects_to_track,
                                    object_filters,
                                )
                            )

                            refining = True
                        else:
                            selected_objects.append(obj)
                # set the detections list to only include top, complete objects
                # and new detections
                detections = selected_objects

                if refining:
                    refine_count += 1

            ## drop detections that overlap too much
            consolidated_detections = []

            # if detection was run on this frame, consolidate
            if len(regions) > 0:
                # group by name
                detected_object_groups = defaultdict(lambda: [])
                for detection in detections:
                    detected_object_groups[detection[0]].append(detection)

                # loop over detections grouped by label
                for group in detected_object_groups.values():
                    # if the group only has 1 item, skip
                    if len(group) == 1:
                        consolidated_detections.append(group[0])
                        continue

                    # sort smallest to largest by area
                    sorted_by_area = sorted(group, key=lambda g: g[3])

                    for current_detection_idx in range(0, len(sorted_by_area)):
                        current_detection = sorted_by_area[current_detection_idx][2]
                        overlap = 0
                        for to_check_idx in range(
                            min(current_detection_idx + 1, len(sorted_by_area)),
                            len(sorted_by_area),
                        ):
                            to_check = sorted_by_area[to_check_idx][2]
                            # if 90% of smaller detection is inside of another detection, consolidate
                            if (
                                area(intersection(current_detection, to_check))
                                / area(current_detection)
                                > 0.9
                            ):
                                overlap = 1
                                break
                        if overlap == 0:
                            consolidated_detections.append(
                                sorted_by_area[current_detection_idx]
                            )
                # now that we have refined our detections, we need to track objects
                object_tracker.match_and_update(frame_time, consolidated_detections)
            # else, just update the frame times for the stationary objects
            else:
                object_tracker.update_frame_times(frame_time)

        # add to the queue if not full
        if detected_objects_queue.full():
            frame_manager.delete(f"{camera_name}{frame_time}")
            continue
        else:
            fps_tracker.update()
            fps.value = fps_tracker.eps()
            detected_objects_queue.put(
                (
                    camera_name,
                    frame_time,
                    object_tracker.tracked_objects,
                    motion_boxes,
                    regions,
                )
            )
            detection_fps.value = object_detector.fps.eps()
            frame_manager.close(f"{camera_name}{frame_time}")
示例#18
0
 def __init__(self, name, labels, detection_queue):
     self.labels = load_labels(labels)
     self.name = name
     self.fps = EventsPerSecond()
     self.plasma_client = plasma.connect("/tmp/plasma")
     self.detection_queue = detection_queue
示例#19
0
文件: edgetpu.py 项目: gpete/frigate
class LocalObjectDetector(ObjectDetector):
    def __init__(self, tf_device=None, num_threads=3, labels=None):
        self.fps = EventsPerSecond()
        if labels is None:
            self.labels = {}
        else:
            self.labels = load_labels(labels)

        device_config = {"device": "usb"}
        if not tf_device is None:
            device_config = {"device": tf_device}

        edge_tpu_delegate = None

        if tf_device != "cpu":
            try:
                logger.info(
                    f"Attempting to load TPU as {device_config['device']}")
                edge_tpu_delegate = load_delegate("libedgetpu.so.1.0",
                                                  device_config)
                logger.info("TPU found")
                self.interpreter = tflite.Interpreter(
                    model_path="/edgetpu_model.tflite",
                    experimental_delegates=[edge_tpu_delegate],
                )
            except ValueError:
                logger.info("No EdgeTPU detected.")
                raise
        else:
            self.interpreter = tflite.Interpreter(
                model_path="/cpu_model.tflite", num_threads=num_threads)

        self.interpreter.allocate_tensors()

        self.tensor_input_details = self.interpreter.get_input_details()
        self.tensor_output_details = self.interpreter.get_output_details()

    def detect(self, tensor_input, threshold=0.4):
        detections = []

        raw_detections = self.detect_raw(tensor_input)

        for d in raw_detections:
            if d[1] < threshold:
                break
            detections.append((self.labels[int(d[0])], float(d[1]),
                               (d[2], d[3], d[4], d[5])))
        self.fps.update()
        return detections

    def detect_raw(self, tensor_input):
        self.interpreter.set_tensor(self.tensor_input_details[0]["index"],
                                    tensor_input)
        self.interpreter.invoke()
        boxes = np.squeeze(
            self.interpreter.get_tensor(
                self.tensor_output_details[0]["index"]))
        label_codes = np.squeeze(
            self.interpreter.get_tensor(
                self.tensor_output_details[1]["index"]))
        scores = np.squeeze(
            self.interpreter.get_tensor(
                self.tensor_output_details[2]["index"]))

        detections = np.zeros((20, 6), np.float32)
        for i, score in enumerate(scores):
            detections[i] = [
                label_codes[i],
                score,
                boxes[i][0],
                boxes[i][1],
                boxes[i][2],
                boxes[i][3],
            ]

        return detections
示例#20
0
def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: FrameManager, 
    frame_queue, fps:mp.Value, skipped_fps: mp.Value, current_frame: mp.Value):

    frame_size = frame_shape[0] * frame_shape[1]
    frame_rate = EventsPerSecond()
    frame_rate.start()
    skipped_eps = EventsPerSecond()
    skipped_eps.start()
    while True:
        fps.value = frame_rate.eps()
        skipped_fps = skipped_eps.eps()

        current_frame.value = datetime.datetime.now().timestamp()
        frame_name = f"{camera_name}{current_frame.value}"
        frame_buffer = frame_manager.create(frame_name, frame_size)
        try:
            frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
        except Exception as e:
            logger.info(f"{camera_name}: ffmpeg sent a broken frame. {e}")

            if ffmpeg_process.poll() != None:
                logger.info(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
                frame_manager.delete(frame_name)
                break
            continue

        frame_rate.update()

        # if the queue is full, skip this frame
        if frame_queue.full():
            skipped_eps.update()
            frame_manager.delete(frame_name)
            continue

        # close the frame
        frame_manager.close(frame_name)

        # add to the queue
        frame_queue.put(current_frame.value)
示例#21
0
def track_camera(name, camera, ffmpeg_global_config, global_objects_config,
                 detection_queue, detected_objects_queue, fps, skipped_fps,
                 detection_fps):
    info(f"Starting process for {name}: {os.getpid()}")
    # info("name={} config:{} ffmpeg_global_config={} global_objects_config={}  fps={}  skipped_fps={} detection_fps={} ".format(name, config, ffmpeg_global_config, global_objects_config, fps, skipped_fps, detection_fps))
    # Merge the ffmpeg config with the global config
    config = camera.camera_conf
    ffmpeg = config.get('ffmpeg', {})
    # info(ffmpeg)
    # ffmpeg_input = get_ffmpeg_input(ffmpeg['input'])
    ffmpeg_input = camera.live_address
    # info(camera.live_address)
    # info("6666666")
    ffmpeg_global_args = ffmpeg.get('global_args',
                                    ffmpeg_global_config['global_args'])
    ffmpeg_hwaccel_args = ffmpeg.get('hwaccel_args',
                                     ffmpeg_global_config['hwaccel_args'])
    ffmpeg_input_args = ffmpeg.get('input_args',
                                   ffmpeg_global_config['input_args'])
    ffmpeg_output_args = ffmpeg.get('output_args',
                                    ffmpeg_global_config['output_args'])
    ffmpeg_cmd = (['ffmpeg'] + ffmpeg_global_args + ffmpeg_hwaccel_args +
                  ffmpeg_input_args + ['-i', ffmpeg_input] +
                  ffmpeg_output_args + ['pipe:'])

    # info(ffmpeg_cmd)

    # Merge the tracked object config with the global config
    camera_objects_config = config.get('objects', {})
    # combine tracked objects lists
    objects_to_track = set().union(
        global_objects_config.get('track', ['person', 'car', 'truck']),
        camera_objects_config.get('track', []))
    # merge object filters
    global_object_filters = global_objects_config.get('filters', {})
    camera_object_filters = camera_objects_config.get('filters', {})
    objects_with_config = set().union(global_object_filters.keys(),
                                      camera_object_filters.keys())
    object_filters = {}
    for obj in objects_with_config:
        object_filters[obj] = {
            **global_object_filters.get(obj, {}),
            **camera_object_filters.get(obj, {})
        }

    expected_fps = config['fps']
    take_frame = config.get('take_frame', 1)

    if 'width' in config and 'height' in config:
        frame_shape = (config['height'], config['width'], 3)
    else:

        frame_shape = get_frame_shape(ffmpeg_input)
        info(frame_shape)

    frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]

    try:
        sa.delete(name)
    except:
        pass

    frame = sa.create(name, shape=frame_shape, dtype=np.uint8)

    # load in the mask for object detection
    if 'mask' in config:
        mask = cv2.imread("/config/{}".format(config['mask']),
                          cv2.IMREAD_GRAYSCALE)
    else:
        mask = None

    if mask is None:
        mask = np.zeros((frame_shape[0], frame_shape[1], 1), np.uint8)
        mask[:] = 255

    motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
    object_detector = RemoteObjectDetector(name, '/labelmap.txt',
                                           detection_queue)

    object_tracker = ObjectTracker(10)

    ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, frame_size)

    plasma_client = plasma.connect("/tmp/plasma")
    frame_num = 0
    avg_wait = 0.0
    fps_tracker = EventsPerSecond()
    skipped_fps_tracker = EventsPerSecond()
    fps_tracker.start()
    skipped_fps_tracker.start()
    object_detector.fps.start()
    while True:
        start = datetime.datetime.now().timestamp()
        frame_bytes = ffmpeg_process.stdout.read(frame_size)
        duration = datetime.datetime.now().timestamp() - start
        avg_wait = (avg_wait * 99 + duration) / 100

        if not frame_bytes:
            rc = ffmpeg_process.poll()
            if rc is not None:
                info(f"{name}: ffmpeg_process exited unexpectedly with {rc}")
                ffmpeg_process = start_or_restart_ffmpeg(
                    ffmpeg_cmd, frame_size, ffmpeg_process)
                time.sleep(10)
            else:
                error(
                    f"{name}: ffmpeg_process is still running but didnt return any bytes"
                )
            continue

        # limit frame rate
        frame_num += 1
        if (frame_num % take_frame) != 0:
            continue

        fps_tracker.update()
        fps.value = fps_tracker.eps()
        detection_fps.value = object_detector.fps.eps()

        frame_time = datetime.datetime.now().timestamp()

        # Store frame in numpy array
        frame[:] = (np.frombuffer(frame_bytes, np.uint8).reshape(frame_shape))

        # look for motion
        motion_boxes = motion_detector.detect(frame)

        # skip object detection if we are below the min_fps and wait time is less than half the average
        if frame_num > 100 and fps.value < expected_fps - 1 and duration < 0.5 * avg_wait:
            skipped_fps_tracker.update()
            skipped_fps.value = skipped_fps_tracker.eps()
            continue

        skipped_fps.value = skipped_fps_tracker.eps()

        tracked_objects = object_tracker.tracked_objects.values()

        # merge areas of motion that intersect with a known tracked object into a single area to look at
        areas_of_interest = []
        used_motion_boxes = []
        for obj in tracked_objects:
            x_min, y_min, x_max, y_max = obj['box']
            for m_index, motion_box in enumerate(motion_boxes):
                if area(intersection(obj['box'],
                                     motion_box)) / area(motion_box) > .5:
                    used_motion_boxes.append(m_index)
                    x_min = min(obj['box'][0], motion_box[0])
                    y_min = min(obj['box'][1], motion_box[1])
                    x_max = max(obj['box'][2], motion_box[2])
                    y_max = max(obj['box'][3], motion_box[3])
            areas_of_interest.append((x_min, y_min, x_max, y_max))
        unused_motion_boxes = set(range(
            0, len(motion_boxes))).difference(used_motion_boxes)

        # compute motion regions
        motion_regions = [
            calculate_region(frame_shape, motion_boxes[i][0],
                             motion_boxes[i][1], motion_boxes[i][2],
                             motion_boxes[i][3], 1.2)
            for i in unused_motion_boxes
        ]

        # compute tracked object regions
        object_regions = [
            calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
            for a in areas_of_interest
        ]

        # merge regions with high IOU
        merged_regions = motion_regions + object_regions
        while True:
            max_iou = 0.0
            max_indices = None
            region_indices = range(len(merged_regions))
            for a, b in itertools.combinations(region_indices, 2):
                iou = intersection_over_union(merged_regions[a],
                                              merged_regions[b])
                if iou > max_iou:
                    max_iou = iou
                    max_indices = (a, b)
            if max_iou > 0.1:
                a = merged_regions[max_indices[0]]
                b = merged_regions[max_indices[1]]
                merged_regions.append(
                    calculate_region(frame_shape, min(a[0], b[0]),
                                     min(a[1], b[1]), max(a[2], b[2]),
                                     max(a[3], b[3]), 1))
                del merged_regions[max(max_indices[0], max_indices[1])]
                del merged_regions[min(max_indices[0], max_indices[1])]
            else:
                break

        # resize regions and detect
        detections = []
        for region in merged_regions:

            tensor_input = create_tensor_input(frame, region)

            region_detections = object_detector.detect(tensor_input)

            for d in region_detections:
                box = d[2]
                size = region[2] - region[0]
                x_min = int((box[1] * size) + region[0])
                y_min = int((box[0] * size) + region[1])
                x_max = int((box[3] * size) + region[0])
                y_max = int((box[2] * size) + region[1])
                det = (d[0], d[1], (x_min, y_min, x_max, y_max),
                       (x_max - x_min) * (y_max - y_min), region)
                if filtered(det, objects_to_track, object_filters, mask):
                    continue
                detections.append(det)

        #########
        # merge objects, check for clipped objects and look again up to N times
        #########
        refining = True
        refine_count = 0
        while refining and refine_count < 4:
            refining = False

            # group by name
            detected_object_groups = defaultdict(lambda: [])
            for detection in detections:
                detected_object_groups[detection[0]].append(detection)

            selected_objects = []
            for group in detected_object_groups.values():

                # apply non-maxima suppression to suppress weak, overlapping bounding boxes
                boxes = [(o[2][0], o[2][1], o[2][2] - o[2][0],
                          o[2][3] - o[2][1]) for o in group]
                confidences = [o[1] for o in group]
                idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)

                for index in idxs:
                    obj = group[index[0]]
                    if clipped(obj, frame_shape):  #obj['clipped']:
                        box = obj[2]
                        # calculate a new region that will hopefully get the entire object
                        region = calculate_region(frame_shape, box[0], box[1],
                                                  box[2], box[3])

                        tensor_input = create_tensor_input(frame, region)
                        # run detection on new region
                        refined_detections = object_detector.detect(
                            tensor_input)
                        for d in refined_detections:
                            box = d[2]
                            size = region[2] - region[0]
                            x_min = int((box[1] * size) + region[0])
                            y_min = int((box[0] * size) + region[1])
                            x_max = int((box[3] * size) + region[0])
                            y_max = int((box[2] * size) + region[1])
                            det = (d[0], d[1], (x_min, y_min, x_max, y_max),
                                   (x_max - x_min) * (y_max - y_min), region)
                            if filtered(det, objects_to_track, object_filters,
                                        mask):
                                continue
                            selected_objects.append(det)

                        refining = True
                    else:
                        selected_objects.append(obj)

            # set the detections list to only include top, complete objects
            # and new detections
            detections = selected_objects

            if refining:
                refine_count += 1

        # now that we have refined our detections, we need to track objects
        object_tracker.match_and_update(frame_time, detections)

        # put the frame in the plasma store
        object_id = hashlib.sha1(str.encode(f"{name}{frame_time}")).digest()
        plasma_client.put(frame, plasma.ObjectID(object_id))
        # add to the queue
        detected_objects_queue.put(
            (name, frame_time, object_tracker.tracked_objects))

    info(f"{name}: exiting subprocess")
示例#22
0
def main():
    # connect to mqtt and setup last will
    def on_connect(client, userdata, flags, rc):
        print("On connect called")
        if rc != 0:
            if rc == 3:
                print("MQTT Server unavailable")
            elif rc == 4:
                print("MQTT Bad username or password")
            elif rc == 5:
                print("MQTT Not authorized")
            else:
                print(
                    "Unable to connect to MQTT: Connection refused. Error code: "
                    + str(rc))
        # publish a message to signal that the service is running
        client.publish(MQTT_TOPIC_PREFIX + '/available', 'online', retain=True)

    client = mqtt.Client(client_id=MQTT_CLIENT_ID)
    client.on_connect = on_connect
    client.will_set(MQTT_TOPIC_PREFIX + '/available',
                    payload='offline',
                    qos=1,
                    retain=True)
    if not MQTT_USER is None:
        client.username_pw_set(MQTT_USER, password=MQTT_PASS)
    client.connect(MQTT_HOST, MQTT_PORT, 60)
    client.loop_start()

    plasma_process = start_plasma_store()

    ##
    # Setup config defaults for cameras
    ##
    for name, config in CONFIG['cameras'].items():
        config['snapshots'] = {
            'show_timestamp':
            config.get('snapshots', {}).get('show_timestamp', True)
        }

    # Queue for cameras to push tracked objects to
    tracked_objects_queue = mp.SimpleQueue()

    # Queue for clip processing
    event_queue = mp.Queue()

    # Start the shared tflite process
    tflite_process = EdgeTPUProcess()

    # start the camera processes
    camera_processes = {}
    for name, config in CONFIG['cameras'].items():
        # Merge the ffmpeg config with the global config
        ffmpeg = config.get('ffmpeg', {})
        ffmpeg_input = get_ffmpeg_input(ffmpeg['input'])
        ffmpeg_global_args = ffmpeg.get('global_args',
                                        FFMPEG_DEFAULT_CONFIG['global_args'])
        ffmpeg_hwaccel_args = ffmpeg.get('hwaccel_args',
                                         FFMPEG_DEFAULT_CONFIG['hwaccel_args'])
        ffmpeg_input_args = ffmpeg.get('input_args',
                                       FFMPEG_DEFAULT_CONFIG['input_args'])
        ffmpeg_output_args = ffmpeg.get('output_args',
                                        FFMPEG_DEFAULT_CONFIG['output_args'])
        if config.get('save_clips', {}).get('enabled', False):
            ffmpeg_output_args = [
                "-f", "segment", "-segment_time", "10", "-segment_format",
                "mp4", "-reset_timestamps", "1", "-strftime", "1", "-c",
                "copy", "-an", "-map", "0", f"/cache/{name}-%Y%m%d%H%M%S.mp4"
            ] + ffmpeg_output_args
        ffmpeg_cmd = (['ffmpeg'] + ffmpeg_global_args + ffmpeg_hwaccel_args +
                      ffmpeg_input_args + ['-i', ffmpeg_input] +
                      ffmpeg_output_args + ['pipe:'])

        if 'width' in config and 'height' in config:
            frame_shape = (config['height'], config['width'], 3)
        else:
            frame_shape = get_frame_shape(ffmpeg_input)

        frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
        take_frame = config.get('take_frame', 1)

        detection_frame = mp.Value('d', 0.0)

        ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, frame_size)
        frame_queue = mp.SimpleQueue()
        camera_fps = EventsPerSecond()
        camera_fps.start()
        camera_capture = CameraCapture(name, ffmpeg_process, frame_shape,
                                       frame_queue, take_frame, camera_fps,
                                       detection_frame)
        camera_capture.start()

        camera_processes[name] = {
            'camera_fps': camera_fps,
            'take_frame': take_frame,
            'process_fps': mp.Value('d', 0.0),
            'detection_fps': mp.Value('d', 0.0),
            'detection_frame': detection_frame,
            'read_start': mp.Value('d', 0.0),
            'ffmpeg_process': ffmpeg_process,
            'ffmpeg_cmd': ffmpeg_cmd,
            'frame_queue': frame_queue,
            'frame_shape': frame_shape,
            'capture_thread': camera_capture
        }

        camera_process = mp.Process(
            target=track_camera,
            args=(name, config, GLOBAL_OBJECT_CONFIG, frame_queue, frame_shape,
                  tflite_process.detection_queue, tracked_objects_queue,
                  camera_processes[name]['process_fps'],
                  camera_processes[name]['detection_fps'],
                  camera_processes[name]['read_start'],
                  camera_processes[name]['detection_frame']))
        camera_process.daemon = True
        camera_processes[name]['process'] = camera_process

    for name, camera_process in camera_processes.items():
        camera_process['process'].start()
        print(
            f"Camera_process started for {name}: {camera_process['process'].pid}"
        )

    event_processor = EventProcessor(CONFIG['cameras'], camera_processes,
                                     '/cache', '/clips', event_queue)
    event_processor.start()

    object_processor = TrackedObjectProcessor(CONFIG['cameras'], client,
                                              MQTT_TOPIC_PREFIX,
                                              tracked_objects_queue,
                                              event_queue)
    object_processor.start()

    camera_watchdog = CameraWatchdog(camera_processes, CONFIG['cameras'],
                                     tflite_process, tracked_objects_queue,
                                     plasma_process)
    camera_watchdog.start()

    # create a flask app that encodes frames a mjpeg on demand
    app = Flask(__name__)
    log = logging.getLogger('werkzeug')
    log.setLevel(logging.ERROR)

    @app.route('/')
    def ishealthy():
        # return a healh
        return "Frigate is running. Alive and healthy!"

    @app.route('/debug/stack')
    def processor_stack():
        frame = sys._current_frames().get(object_processor.ident, None)
        if frame:
            return "<br>".join(traceback.format_stack(frame)), 200
        else:
            return "no frame found", 200

    @app.route('/debug/print_stack')
    def print_stack():
        pid = int(request.args.get('pid', 0))
        if pid == 0:
            return "missing pid", 200
        else:
            os.kill(pid, signal.SIGUSR1)
            return "check logs", 200

    @app.route('/debug/stats')
    def stats():
        stats = {}

        total_detection_fps = 0

        for name, camera_stats in camera_processes.items():
            total_detection_fps += camera_stats['detection_fps'].value
            capture_thread = camera_stats['capture_thread']
            stats[name] = {
                'camera_fps': round(capture_thread.fps.eps(), 2),
                'process_fps': round(camera_stats['process_fps'].value, 2),
                'skipped_fps': round(capture_thread.skipped_fps.eps(), 2),
                'detection_fps': round(camera_stats['detection_fps'].value, 2),
                'read_start': camera_stats['read_start'].value,
                'pid': camera_stats['process'].pid,
                'ffmpeg_pid': camera_stats['ffmpeg_process'].pid,
                'frame_info': {
                    'read':
                    capture_thread.current_frame,
                    'detect':
                    camera_stats['detection_frame'].value,
                    'process':
                    object_processor.camera_data[name]['current_frame_time']
                }
            }

        stats['coral'] = {
            'fps':
            round(total_detection_fps, 2),
            'inference_speed':
            round(tflite_process.avg_inference_speed.value * 1000, 2),
            'detection_start':
            tflite_process.detection_start.value,
            'pid':
            tflite_process.detect_process.pid
        }

        rc = camera_watchdog.plasma_process.poll()
        stats['plasma_store_rc'] = rc

        return jsonify(stats)

    @app.route('/<camera_name>/<label>/best.jpg')
    def best(camera_name, label):
        if camera_name in CONFIG['cameras']:
            best_frame = object_processor.get_best(camera_name, label)
            if best_frame is None:
                best_frame = np.zeros((720, 1280, 3), np.uint8)
            best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR)
            ret, jpg = cv2.imencode('.jpg', best_frame)
            response = make_response(jpg.tobytes())
            response.headers['Content-Type'] = 'image/jpg'
            return response
        else:
            return "Camera named {} not found".format(camera_name), 404

    @app.route('/<camera_name>')
    def mjpeg_feed(camera_name):
        fps = int(request.args.get('fps', '3'))
        height = int(request.args.get('h', '360'))
        if camera_name in CONFIG['cameras']:
            # return a multipart response
            return Response(
                imagestream(camera_name, fps, height),
                mimetype='multipart/x-mixed-replace; boundary=frame')
        else:
            return "Camera named {} not found".format(camera_name), 404

    def imagestream(camera_name, fps, height):
        while True:
            # max out at specified FPS
            time.sleep(1 / fps)
            frame = object_processor.get_current_frame(camera_name)
            if frame is None:
                frame = np.zeros((height, int(height * 16 / 9), 3), np.uint8)

            width = int(height * frame.shape[1] / frame.shape[0])

            frame = cv2.resize(frame,
                               dsize=(width, height),
                               interpolation=cv2.INTER_LINEAR)
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

            ret, jpg = cv2.imencode('.jpg', frame)
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() +
                   b'\r\n\r\n')

    app.run(host='0.0.0.0', port=WEB_PORT, debug=False)

    object_processor.join()

    plasma_process.terminate()
示例#23
0
def main():
    stop_event = threading.Event()

    # connect to mqtt and setup last will
    def on_connect(client, userdata, flags, rc):
        print("On connect called")
        if rc != 0:
            if rc == 3:
                print("MQTT Server unavailable")
            elif rc == 4:
                print("MQTT Bad username or password")
            elif rc == 5:
                print("MQTT Not authorized")
            else:
                print(
                    "Unable to connect to MQTT: Connection refused. Error code: "
                    + str(rc))
        # publish a message to signal that the service is running
        client.publish(MQTT_TOPIC_PREFIX + '/available', 'online', retain=True)

    client = mqtt.Client(client_id=MQTT_CLIENT_ID)
    client.on_connect = on_connect
    client.will_set(MQTT_TOPIC_PREFIX + '/available',
                    payload='offline',
                    qos=1,
                    retain=True)
    if not MQTT_USER is None:
        client.username_pw_set(MQTT_USER, password=MQTT_PASS)
    client.connect(MQTT_HOST, MQTT_PORT, 60)
    client.loop_start()

    ##
    # Setup config defaults for cameras
    ##
    for name, config in CONFIG['cameras'].items():
        config['snapshots'] = {
            'show_timestamp':
            config.get('snapshots', {}).get('show_timestamp', True),
            'draw_zones':
            config.get('snapshots', {}).get('draw_zones', False),
            'draw_bounding_boxes':
            config.get('snapshots', {}).get('draw_bounding_boxes', True)
        }
        config['zones'] = config.get('zones', {})

    # Queue for cameras to push tracked objects to
    tracked_objects_queue = mp.Queue()

    # Queue for clip processing
    event_queue = mp.Queue()

    # create the detection pipes and shms
    out_events = {}
    camera_shms = []
    for name in CONFIG['cameras'].keys():
        out_events[name] = mp.Event()
        shm_in = mp.shared_memory.SharedMemory(name=name,
                                               create=True,
                                               size=300 * 300 * 3)
        shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}",
                                                create=True,
                                                size=20 * 6 * 4)
        camera_shms.append(shm_in)
        camera_shms.append(shm_out)

    detection_queue = mp.Queue()

    detectors = {}
    for name, detector in DETECTORS.items():
        if detector['type'] == 'cpu':
            detectors[name] = EdgeTPUProcess(detection_queue,
                                             out_events=out_events,
                                             tf_device='cpu')
        if detector['type'] == 'edgetpu':
            detectors[name] = EdgeTPUProcess(detection_queue,
                                             out_events=out_events,
                                             tf_device=detector['device'])

    # create the camera processes
    camera_processes = {}
    for name, config in CONFIG['cameras'].items():
        # Merge the ffmpeg config with the global config
        ffmpeg = config.get('ffmpeg', {})
        ffmpeg_input = get_ffmpeg_input(ffmpeg['input'])
        ffmpeg_global_args = ffmpeg.get('global_args',
                                        FFMPEG_DEFAULT_CONFIG['global_args'])
        ffmpeg_hwaccel_args = ffmpeg.get('hwaccel_args',
                                         FFMPEG_DEFAULT_CONFIG['hwaccel_args'])
        ffmpeg_input_args = ffmpeg.get('input_args',
                                       FFMPEG_DEFAULT_CONFIG['input_args'])
        ffmpeg_output_args = ffmpeg.get('output_args',
                                        FFMPEG_DEFAULT_CONFIG['output_args'])
        if not config.get('fps') is None:
            ffmpeg_output_args = ["-r", str(config.get('fps'))
                                  ] + ffmpeg_output_args
        if config.get('save_clips', {}).get('enabled', False):
            ffmpeg_output_args = [
                "-f", "segment", "-segment_time", "10", "-segment_format",
                "mp4", "-reset_timestamps", "1", "-strftime", "1", "-c",
                "copy", "-an", "-map", "0",
                f"{os.path.join(CACHE_DIR, name)}-%Y%m%d%H%M%S.mp4"
            ] + ffmpeg_output_args
        ffmpeg_cmd = (['ffmpeg'] + ffmpeg_global_args + ffmpeg_hwaccel_args +
                      ffmpeg_input_args + ['-i', ffmpeg_input] +
                      ffmpeg_output_args + ['pipe:'])

        if 'width' in config and 'height' in config:
            frame_shape = (config['height'], config['width'], 3)
        else:
            frame_shape = get_frame_shape(ffmpeg_input)

        config['frame_shape'] = frame_shape

        frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
        take_frame = config.get('take_frame', 1)

        detection_frame = mp.Value('d', 0.0)

        ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, frame_size)
        frame_queue = mp.Queue(maxsize=2)
        camera_fps = EventsPerSecond()
        camera_fps.start()
        camera_capture = CameraCapture(name, ffmpeg_process, frame_shape,
                                       frame_queue, take_frame, camera_fps,
                                       stop_event)
        camera_capture.start()

        camera_processes[name] = {
            'camera_fps': camera_fps,
            'take_frame': take_frame,
            'process_fps': mp.Value('d', 0.0),
            'detection_fps': mp.Value('d', 0.0),
            'detection_frame': detection_frame,
            'read_start': mp.Value('d', 0.0),
            'ffmpeg_process': ffmpeg_process,
            'ffmpeg_cmd': ffmpeg_cmd,
            'frame_queue': frame_queue,
            'frame_shape': frame_shape,
            'capture_thread': camera_capture
        }

        # merge global object config into camera object config
        camera_objects_config = config.get('objects', {})
        # get objects to track for camera
        objects_to_track = camera_objects_config.get(
            'track', GLOBAL_OBJECT_CONFIG.get('track', ['person']))
        # get object filters
        object_filters = camera_objects_config.get(
            'filters', GLOBAL_OBJECT_CONFIG.get('filters', {}))
        config['objects'] = {
            'track': objects_to_track,
            'filters': object_filters
        }

        camera_process = mp.Process(
            target=track_camera,
            args=(name, config, frame_queue, frame_shape, detection_queue,
                  out_events[name], tracked_objects_queue,
                  camera_processes[name]['process_fps'],
                  camera_processes[name]['detection_fps'],
                  camera_processes[name]['read_start'],
                  camera_processes[name]['detection_frame'], stop_event))
        camera_process.daemon = True
        camera_processes[name]['process'] = camera_process

    # start the camera_processes
    for name, camera_process in camera_processes.items():
        camera_process['process'].start()
        print(
            f"Camera_process started for {name}: {camera_process['process'].pid}"
        )

    event_processor = EventProcessor(CONFIG, camera_processes, CACHE_DIR,
                                     CLIPS_DIR, event_queue, stop_event)
    event_processor.start()

    object_processor = TrackedObjectProcessor(CONFIG['cameras'], client,
                                              MQTT_TOPIC_PREFIX,
                                              tracked_objects_queue,
                                              event_queue, stop_event)
    object_processor.start()

    camera_watchdog = CameraWatchdog(camera_processes, CONFIG['cameras'],
                                     detectors, detection_queue,
                                     tracked_objects_queue, stop_event)
    camera_watchdog.start()

    def receiveSignal(signalNumber, frame):
        print('Received:', signalNumber)
        stop_event.set()
        event_processor.join()
        object_processor.join()
        camera_watchdog.join()
        for camera_name, camera_process in camera_processes.items():
            camera_process['capture_thread'].join()
            # cleanup the frame queue
            while not camera_process['frame_queue'].empty():
                frame_time = camera_process['frame_queue'].get()
                shm = mp.shared_memory.SharedMemory(
                    name=f"{camera_name}{frame_time}")
                shm.close()
                shm.unlink()

        for detector in detectors.values():
            detector.stop()
        for shm in camera_shms:
            shm.close()
            shm.unlink()
        sys.exit()

    signal.signal(signal.SIGTERM, receiveSignal)
    signal.signal(signal.SIGINT, receiveSignal)

    # create a flask app that encodes frames a mjpeg on demand
    app = Flask(__name__)
    log = logging.getLogger('werkzeug')
    log.setLevel(logging.ERROR)

    @app.route('/')
    def ishealthy():
        # return a healh
        return "Frigate is running. Alive and healthy!"

    @app.route('/debug/stack')
    def processor_stack():
        frame = sys._current_frames().get(object_processor.ident, None)
        if frame:
            return "<br>".join(traceback.format_stack(frame)), 200
        else:
            return "no frame found", 200

    @app.route('/debug/print_stack')
    def print_stack():
        pid = int(request.args.get('pid', 0))
        if pid == 0:
            return "missing pid", 200
        else:
            os.kill(pid, signal.SIGUSR1)
            return "check logs", 200

    @app.route('/debug/stats')
    def stats():
        stats = {}

        total_detection_fps = 0

        for name, camera_stats in camera_processes.items():
            total_detection_fps += camera_stats['detection_fps'].value
            capture_thread = camera_stats['capture_thread']
            stats[name] = {
                'camera_fps': round(capture_thread.fps.eps(), 2),
                'process_fps': round(camera_stats['process_fps'].value, 2),
                'skipped_fps': round(capture_thread.skipped_fps.eps(), 2),
                'detection_fps': round(camera_stats['detection_fps'].value, 2),
                'read_start': camera_stats['read_start'].value,
                'pid': camera_stats['process'].pid,
                'ffmpeg_pid': camera_stats['ffmpeg_process'].pid,
                'frame_info': {
                    'read':
                    capture_thread.current_frame.value,
                    'detect':
                    camera_stats['detection_frame'].value,
                    'process':
                    object_processor.camera_data[name]['current_frame_time']
                }
            }

        stats['detectors'] = {}
        for name, detector in detectors.items():
            stats['detectors'][name] = {
                'inference_speed':
                round(detector.avg_inference_speed.value * 1000, 2),
                'detection_start':
                detector.detection_start.value,
                'pid':
                detector.detect_process.pid
            }
        stats['detection_fps'] = round(total_detection_fps, 2)

        return jsonify(stats)

    @app.route('/<camera_name>/<label>/best.jpg')
    def best(camera_name, label):
        if camera_name in CONFIG['cameras']:
            best_object = object_processor.get_best(camera_name, label)
            best_frame = best_object.get('frame')
            if best_frame is None:
                best_frame = np.zeros((720, 1280, 3), np.uint8)
            else:
                best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)

            crop = bool(request.args.get('crop', 0, type=int))
            if crop:
                region = best_object.get('region', [0, 0, 300, 300])
                best_frame = best_frame[region[1]:region[3],
                                        region[0]:region[2]]

            height = int(request.args.get('h', str(best_frame.shape[0])))
            width = int(height * best_frame.shape[1] / best_frame.shape[0])

            best_frame = cv2.resize(best_frame,
                                    dsize=(width, height),
                                    interpolation=cv2.INTER_AREA)
            ret, jpg = cv2.imencode('.jpg', best_frame)
            response = make_response(jpg.tobytes())
            response.headers['Content-Type'] = 'image/jpg'
            return response
        else:
            return "Camera named {} not found".format(camera_name), 404

    @app.route('/<camera_name>')
    def mjpeg_feed(camera_name):
        fps = int(request.args.get('fps', '3'))
        height = int(request.args.get('h', '360'))
        if camera_name in CONFIG['cameras']:
            # return a multipart response
            return Response(
                imagestream(camera_name, fps, height),
                mimetype='multipart/x-mixed-replace; boundary=frame')
        else:
            return "Camera named {} not found".format(camera_name), 404

    @app.route('/<camera_name>/latest.jpg')
    def latest_frame(camera_name):
        if camera_name in CONFIG['cameras']:
            # max out at specified FPS
            frame = object_processor.get_current_frame(camera_name)
            if frame is None:
                frame = np.zeros((720, 1280, 3), np.uint8)

            height = int(request.args.get('h', str(frame.shape[0])))
            width = int(height * frame.shape[1] / frame.shape[0])

            frame = cv2.resize(frame,
                               dsize=(width, height),
                               interpolation=cv2.INTER_AREA)

            ret, jpg = cv2.imencode('.jpg', frame)
            response = make_response(jpg.tobytes())
            response.headers['Content-Type'] = 'image/jpg'
            return response
        else:
            return "Camera named {} not found".format(camera_name), 404

    def imagestream(camera_name, fps, height):
        while True:
            # max out at specified FPS
            time.sleep(1 / fps)
            frame = object_processor.get_current_frame(camera_name, draw=True)
            if frame is None:
                frame = np.zeros((height, int(height * 16 / 9), 3), np.uint8)

            width = int(height * frame.shape[1] / frame.shape[0])
            frame = cv2.resize(frame,
                               dsize=(width, height),
                               interpolation=cv2.INTER_LINEAR)

            ret, jpg = cv2.imencode('.jpg', frame)
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() +
                   b'\r\n\r\n')

    app.run(host='0.0.0.0', port=WEB_PORT, debug=False)

    object_processor.join()
示例#24
0
def track_camera(name, config, global_objects_config, frame_queue, frame_shape,
                 detection_queue, detected_objects_queue, fps, detection_fps,
                 read_start, detection_frame):
    print(f"Starting process for {name}: {os.getpid()}")
    listen()

    detection_frame.value = 0.0

    # Merge the tracked object config with the global config
    camera_objects_config = config.get('objects', {})
    # combine tracked objects lists
    objects_to_track = set().union(
        global_objects_config.get('track', ['person', 'car', 'truck']),
        camera_objects_config.get('track', []))
    # merge object filters
    global_object_filters = global_objects_config.get('filters', {})
    camera_object_filters = camera_objects_config.get('filters', {})
    objects_with_config = set().union(global_object_filters.keys(),
                                      camera_object_filters.keys())
    object_filters = {}
    for obj in objects_with_config:
        object_filters[obj] = {
            **global_object_filters.get(obj, {}),
            **camera_object_filters.get(obj, {})
        }

    frame = np.zeros(frame_shape, np.uint8)

    # load in the mask for object detection
    if 'mask' in config:
        mask = cv2.imread("/config/{}".format(config['mask']),
                          cv2.IMREAD_GRAYSCALE)
    else:
        mask = None

    if mask is None:
        mask = np.zeros((frame_shape[0], frame_shape[1], 1), np.uint8)
        mask[:] = 255

    motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
    object_detector = RemoteObjectDetector(name, '/labelmap.txt',
                                           detection_queue)

    camera_tracker_config = config.get('tracker', {
        "min_hits": 1,
        "max_age": 5,
        "iou_threshold": 0.2
    })
    object_tracker = ObjectTracker(camera_tracker_config["min_hits"],
                                   camera_tracker_config["max_age"],
                                   camera_tracker_config["iou_threshold"])

    plasma_client = PlasmaManager()
    avg_wait = 0.0
    fps_tracker = EventsPerSecond()
    fps_tracker.start()
    object_detector.fps.start()
    while True:
        read_start.value = datetime.datetime.now().timestamp()
        frame_time = frame_queue.get()
        duration = datetime.datetime.now().timestamp() - read_start.value
        read_start.value = 0.0
        avg_wait = (avg_wait * 99 + duration) / 100
        detection_frame.value = frame_time

        # Get frame from plasma store
        frame = plasma_client.get(f"{name}{frame_time}")

        if frame is plasma.ObjectNotAvailable:
            continue

        fps_tracker.update()
        fps.value = fps_tracker.eps()
        detection_fps.value = object_detector.fps.eps()

        # look for motion
        motion_boxes = motion_detector.detect(frame)

        tracked_objects = object_tracker.tracked_objects.values()

        # merge areas of motion that intersect with a known tracked object into a single area to look at
        areas_of_interest = []
        used_motion_boxes = []
        for obj in tracked_objects:
            x_min, y_min, x_max, y_max = obj['box']
            for m_index, motion_box in enumerate(motion_boxes):
                if intersection_over_union(motion_box, obj['box']) > .2:
                    used_motion_boxes.append(m_index)
                    x_min = min(obj['box'][0], motion_box[0])
                    y_min = min(obj['box'][1], motion_box[1])
                    x_max = max(obj['box'][2], motion_box[2])
                    y_max = max(obj['box'][3], motion_box[3])
            areas_of_interest.append((x_min, y_min, x_max, y_max))
        unused_motion_boxes = set(range(
            0, len(motion_boxes))).difference(used_motion_boxes)

        # compute motion regions
        motion_regions = [
            calculate_region(frame_shape, motion_boxes[i][0],
                             motion_boxes[i][1], motion_boxes[i][2],
                             motion_boxes[i][3], 1.2)
            for i in unused_motion_boxes
        ]

        # compute tracked object regions
        object_regions = [
            calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
            for a in areas_of_interest
        ]

        # merge regions with high IOU
        merged_regions = motion_regions + object_regions
        while True:
            max_iou = 0.0
            max_indices = None
            region_indices = range(len(merged_regions))
            for a, b in itertools.combinations(region_indices, 2):
                iou = intersection_over_union(merged_regions[a],
                                              merged_regions[b])
                if iou > max_iou:
                    max_iou = iou
                    max_indices = (a, b)
            if max_iou > 0.1:
                a = merged_regions[max_indices[0]]
                b = merged_regions[max_indices[1]]
                merged_regions.append(
                    calculate_region(frame_shape, min(a[0], b[0]),
                                     min(a[1], b[1]), max(a[2], b[2]),
                                     max(a[3], b[3]), 1))
                del merged_regions[max(max_indices[0], max_indices[1])]
                del merged_regions[min(max_indices[0], max_indices[1])]
            else:
                break

        # resize regions and detect
        detections = []
        for region in merged_regions:

            tensor_input = create_tensor_input(frame, region)

            region_detections = object_detector.detect(tensor_input)

            for d in region_detections:
                box = d[2]
                size = region[2] - region[0]
                x_min = int((box[1] * size) + region[0])
                y_min = int((box[0] * size) + region[1])
                x_max = int((box[3] * size) + region[0])
                y_max = int((box[2] * size) + region[1])
                det = (d[0], d[1], (x_min, y_min, x_max, y_max),
                       (x_max - x_min) * (y_max - y_min), region)
                if filtered(det, objects_to_track, object_filters, mask):
                    continue
                detections.append(det)

        #########
        # merge objects, check for clipped objects and look again up to N times
        #########
        refining = True
        refine_count = 0
        while refining and refine_count < 4:
            refining = False

            # group by name
            detected_object_groups = defaultdict(lambda: [])
            for detection in detections:
                detected_object_groups[detection[0]].append(detection)

            selected_objects = []
            for group in detected_object_groups.values():

                # apply non-maxima suppression to suppress weak, overlapping bounding boxes
                boxes = [(o[2][0], o[2][1], o[2][2] - o[2][0],
                          o[2][3] - o[2][1]) for o in group]
                confidences = [o[1] for o in group]
                idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)

                for index in idxs:
                    obj = group[index[0]]
                    if clipped(obj, frame_shape):
                        box = obj[2]
                        # calculate a new region that will hopefully get the entire object
                        region = calculate_region(frame_shape, box[0], box[1],
                                                  box[2], box[3])

                        tensor_input = create_tensor_input(frame, region)
                        # run detection on new region
                        refined_detections = object_detector.detect(
                            tensor_input)
                        for d in refined_detections:
                            box = d[2]
                            size = region[2] - region[0]
                            x_min = int((box[1] * size) + region[0])
                            y_min = int((box[0] * size) + region[1])
                            x_max = int((box[3] * size) + region[0])
                            y_max = int((box[2] * size) + region[1])
                            det = (d[0], d[1], (x_min, y_min, x_max, y_max),
                                   (x_max - x_min) * (y_max - y_min), region)
                            if filtered(det, objects_to_track, object_filters,
                                        mask):
                                continue
                            selected_objects.append(det)

                        refining = True
                    else:
                        selected_objects.append(obj)

            # set the detections list to only include top, complete objects
            # and new detections
            detections = selected_objects

            if refining:
                refine_count += 1

        # now that we have refined our detections, we need to track objects
        object_tracker.match_and_update(frame_time, detections)

        # add to the queue
        detected_objects_queue.put(
            (name, frame_time, object_tracker.tracked_objects))

    print(f"{name}: exiting subprocess")
示例#25
0
class LocalObjectDetector(ObjectDetector):
    def __init__(self, tf_device=None, labels=None):
        self.fps = EventsPerSecond()
        if labels is None:
            self.labels = {}
        else:
            self.labels = load_labels(labels)

        device_config = {"device": "usb"}
        if not tf_device is None:
            device_config = {"device": tf_device}

        edge_tpu_delegate = None
        try:
            print(f"Attempting to load TPU as {device_config['device']}")
            edge_tpu_delegate = load_delegate('libedgetpu.so.1.0', device_config)
            print("TPU found")
        except ValueError:
            try:
                print(f"Attempting to load TPU as pci:0")
                edge_tpu_delegate = load_delegate('libedgetpu.so.1.0', {"device": "pci:0"})
                print("PCIe TPU found")
            except ValueError:
                print("No EdgeTPU detected. Falling back to CPU.")
        
        if edge_tpu_delegate is None:
            self.interpreter = tflite.Interpreter(
                model_path='/cpu_model.tflite')
        else:
            self.interpreter = tflite.Interpreter(
                model_path='/edgetpu_model.tflite',
                experimental_delegates=[edge_tpu_delegate])
        
        self.interpreter.allocate_tensors()

        self.tensor_input_details = self.interpreter.get_input_details()
        self.tensor_output_details = self.interpreter.get_output_details()
    
    def detect(self, tensor_input, threshold=.4):
        detections = []

        raw_detections = self.detect_raw(tensor_input)

        for d in raw_detections:
            if d[1] < threshold:
                break
            detections.append((
                self.labels[int(d[0])],
                float(d[1]),
                (d[2], d[3], d[4], d[5])
            ))
        self.fps.update()
        return detections

    def detect_raw(self, tensor_input):
        self.interpreter.set_tensor(self.tensor_input_details[0]['index'], tensor_input)
        self.interpreter.invoke()
        boxes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[0]['index']))
        label_codes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[1]['index']))
        scores = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[2]['index']))

        detections = np.zeros((20,6), np.float32)
        for i, score in enumerate(scores):
            detections[i] = [label_codes[i], score, boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]]
        
        return detections
示例#26
0
文件: video.py 项目: yllar/frigate
def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: FrameManager, 
    frame_queue, take_frame: int, fps:mp.Value, skipped_fps: mp.Value, 
    stop_event: mp.Event, current_frame: mp.Value):

    frame_num = 0
    frame_size = frame_shape[0] * frame_shape[1] * 3 // 2
    frame_rate = EventsPerSecond()
    frame_rate.start()
    skipped_eps = EventsPerSecond()
    skipped_eps.start()
    while True:
        fps.value = frame_rate.eps()
        skipped_fps = skipped_eps.eps()
        if stop_event.is_set():
            print(f"{camera_name}: stop event set. exiting capture thread...")
            break

        current_frame.value = datetime.datetime.now().timestamp()
        frame_name = f"{camera_name}{current_frame.value}"
        frame_buffer = frame_manager.create(frame_name, frame_size)
        try:
          frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
        except:
          print(f"{camera_name}: ffmpeg sent a broken frame. something is wrong.")

          if ffmpeg_process.poll() != None:
              print(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
              frame_manager.delete(frame_name)
              break
          
          continue

        frame_rate.update()

        frame_num += 1
        if (frame_num % take_frame) != 0:
            skipped_eps.update()
            frame_manager.delete(frame_name)
            continue

        # if the queue is full, skip this frame
        if frame_queue.full():
            skipped_eps.update()
            frame_manager.delete(frame_name)
            continue

        # close the frame
        frame_manager.close(frame_name)

        # add to the queue
        frame_queue.put(current_frame.value)