Exemple #1
0
def run_detector(detection_queue, out_events: Dict[str, mp.Event], avg_speed,
                 start, tf_device):
    print(f"Starting detection process: {os.getpid()}")
    listen()
    frame_manager = SharedMemoryFrameManager()
    object_detector = LocalObjectDetector(tf_device=tf_device)

    outputs = {}
    for name in out_events.keys():
        out_shm = mp.shared_memory.SharedMemory(name=f"out-{name}",
                                                create=False)
        out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
        outputs[name] = {'shm': out_shm, 'np': out_np}

    while True:
        connection_id = detection_queue.get()
        input_frame = frame_manager.get(connection_id, (1, 300, 300, 3))

        if input_frame is None:
            continue

        # detect and send the output
        start.value = datetime.datetime.now().timestamp()
        detections = object_detector.detect_raw(input_frame)
        duration = datetime.datetime.now().timestamp() - start.value
        outputs[connection_id]['np'][:] = detections[:]
        out_events[connection_id].set()
        start.value = 0.0

        avg_speed.value = (avg_speed.value * 9 + duration) / 10
Exemple #2
0
def run_detector(
    name: str,
    detection_queue: mp.Queue,
    out_events: Dict[str, mp.Event],
    avg_speed,
    start,
    model_path,
    model_shape,
    tf_device,
    num_threads,
):
    threading.current_thread().name = f"detector:{name}"
    logger = logging.getLogger(f"detector.{name}")
    logger.info(f"Starting detection process: {os.getpid()}")
    setproctitle(f"frigate.detector.{name}")
    listen()

    stop_event = mp.Event()

    def receiveSignal(signalNumber, frame):
        stop_event.set()

    signal.signal(signal.SIGTERM, receiveSignal)
    signal.signal(signal.SIGINT, receiveSignal)

    frame_manager = SharedMemoryFrameManager()
    object_detector = LocalObjectDetector(
        tf_device=tf_device, model_path=model_path, num_threads=num_threads
    )

    outputs = {}
    for name in out_events.keys():
        out_shm = mp.shared_memory.SharedMemory(name=f"out-{name}", create=False)
        out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
        outputs[name] = {"shm": out_shm, "np": out_np}

    while not stop_event.is_set():
        try:
            connection_id = detection_queue.get(timeout=5)
        except queue.Empty:
            continue
        input_frame = frame_manager.get(
            connection_id, (1, model_shape[0], model_shape[1], 3)
        )

        if input_frame is None:
            continue

        # detect and send the output
        start.value = datetime.datetime.now().timestamp()
        detections = object_detector.detect_raw(input_frame)
        duration = datetime.datetime.now().timestamp() - start.value
        outputs[connection_id]["np"][:] = detections[:]
        out_events[connection_id].set()
        start.value = 0.0

        avg_speed.value = (avg_speed.value * 9 + duration) / 10
Exemple #3
0
 def __init__(self, clip_path, frame_shape, config: FrigateConfig):
     self.clip_path = clip_path
     self.camera_name = 'camera'
     self.config = config
     self.camera_config = self.config.cameras['camera']
     self.frame_shape = self.camera_config.frame_shape
     self.ffmpeg_cmd = [c['cmd'] for c in self.camera_config.ffmpeg_cmds if 'detect' in c['roles']][0]
     self.frame_manager = SharedMemoryFrameManager()
     self.frame_queue = mp.Queue()
     self.detected_objects_queue = mp.Queue()
     self.camera_state = CameraState(self.camera_name, config, self.frame_manager)
Exemple #4
0
def track_camera(name, config: CameraConfig, model_shape, detection_queue, result_connection, detected_objects_queue, process_info):
    stop_event = mp.Event()
    def receiveSignal(signalNumber, frame):
        stop_event.set()
    
    signal.signal(signal.SIGTERM, receiveSignal)
    signal.signal(signal.SIGINT, receiveSignal)

    threading.current_thread().name = f"process:{name}"
    setproctitle(f"frigate.process:{name}")
    listen()

    frame_queue = process_info['frame_queue']
    detection_enabled = process_info['detection_enabled']

    frame_shape = config.frame_shape
    objects_to_track = config.objects.track
    object_filters = config.objects.filters

    motion_detector = MotionDetector(frame_shape, config.motion)
    object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection, model_shape)

    object_tracker = ObjectTracker(config.detect)

    frame_manager = SharedMemoryFrameManager()

    process_frames(name, frame_queue, frame_shape, model_shape, frame_manager, motion_detector, object_detector,
        object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, detection_enabled, stop_event)

    logger.info(f"{name}: exiting subprocess")
Exemple #5
0
 def __init__(self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps):
     threading.Thread.__init__(self)
     self.name = f"capture:{camera_name}"
     self.camera_name = camera_name
     self.frame_shape = frame_shape
     self.frame_queue = frame_queue
     self.fps = fps
     self.skipped_fps = EventsPerSecond()
     self.frame_manager = SharedMemoryFrameManager()
     self.ffmpeg_process = ffmpeg_process
     self.current_frame = mp.Value('d', 0.0)
     self.last_frame = 0
Exemple #6
0
def track_camera(name, config, frame_queue, frame_shape, detection_queue,
                 result_connection, detected_objects_queue, fps, detection_fps,
                 read_start, detection_frame, stop_event):
    print(f"Starting process for {name}: {os.getpid()}")
    listen()

    detection_frame.value = 0.0

    # Merge the tracked object config with the global config
    camera_objects_config = config.get('objects', {})
    objects_to_track = camera_objects_config.get('track', [])
    object_filters = camera_objects_config.get('filters', {})

    # load in the mask for object detection
    if 'mask' in config:
        if config['mask'].startswith('base64,'):
            img = base64.b64decode(config['mask'][7:])
            npimg = np.fromstring(img, dtype=np.uint8)
            mask = cv2.imdecode(npimg, cv2.IMREAD_GRAYSCALE)
        elif config['mask'].startswith('poly,'):
            points = config['mask'].split(',')[1:]
            contour = np.array([[int(points[i]),
                                 int(points[i + 1])]
                                for i in range(0, len(points), 2)])
            mask = np.zeros((frame_shape[0], frame_shape[1]), np.uint8)
            mask[:] = 255
            cv2.fillPoly(mask, pts=[contour], color=(0))
        else:
            mask = cv2.imread("/config/{}".format(config['mask']),
                              cv2.IMREAD_GRAYSCALE)
    else:
        mask = None

    if mask is None or mask.size == 0:
        mask = np.zeros((frame_shape[0], frame_shape[1]), np.uint8)
        mask[:] = 255

    motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
    object_detector = RemoteObjectDetector(name, '/labelmap.txt',
                                           detection_queue, result_connection)

    object_tracker = ObjectTracker(10)

    frame_manager = SharedMemoryFrameManager()

    process_frames(name, frame_queue, frame_shape, frame_manager,
                   motion_detector, object_detector, object_tracker,
                   detected_objects_queue, fps, detection_fps, detection_frame,
                   objects_to_track, object_filters, mask, stop_event)

    print(f"{name}: exiting subprocess")
Exemple #7
0
 def __init__(self, name, ffmpeg_process, frame_shape, frame_queue, take_frame, fps, stop_event):
     threading.Thread.__init__(self)
     self.name = name
     self.frame_shape = frame_shape
     self.frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
     self.frame_queue = frame_queue
     self.take_frame = take_frame
     self.fps = fps
     self.skipped_fps = EventsPerSecond()
     self.frame_manager = SharedMemoryFrameManager()
     self.ffmpeg_process = ffmpeg_process
     self.current_frame = mp.Value('d', 0.0)
     self.last_frame = 0
     self.stop_event = stop_event
Exemple #8
0
class ProcessClip:
    def __init__(self, clip_path, frame_shape, config: FrigateConfig):
        self.clip_path = clip_path
        self.camera_name = "camera"
        self.config = config
        self.camera_config = self.config.cameras["camera"]
        self.frame_shape = self.camera_config.frame_shape
        self.ffmpeg_cmd = [
            c["cmd"] for c in self.camera_config.ffmpeg_cmds
            if "detect" in c["roles"]
        ][0]
        self.frame_manager = SharedMemoryFrameManager()
        self.frame_queue = mp.Queue()
        self.detected_objects_queue = mp.Queue()
        self.camera_state = CameraState(self.camera_name, config,
                                        self.frame_manager)

    def load_frames(self):
        fps = EventsPerSecond()
        skipped_fps = EventsPerSecond()
        current_frame = mp.Value("d", 0.0)
        frame_size = (self.camera_config.frame_shape_yuv[0] *
                      self.camera_config.frame_shape_yuv[1])
        ffmpeg_process = start_or_restart_ffmpeg(self.ffmpeg_cmd, logger,
                                                 sp.DEVNULL, frame_size)
        capture_frames(
            ffmpeg_process,
            self.camera_name,
            self.camera_config.frame_shape_yuv,
            self.frame_manager,
            self.frame_queue,
            fps,
            skipped_fps,
            current_frame,
        )
        ffmpeg_process.wait()
        ffmpeg_process.communicate()

    def process_frames(self, objects_to_track=["person"], object_filters={}):
        mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1),
                        np.uint8)
        mask[:] = 255
        motion_detector = MotionDetector(self.frame_shape, mask,
                                         self.camera_config.motion)

        object_detector = LocalObjectDetector(labels="/labelmap.txt")
        object_tracker = ObjectTracker(self.camera_config.detect)
        process_info = {
            "process_fps": mp.Value("d", 0.0),
            "detection_fps": mp.Value("d", 0.0),
            "detection_frame": mp.Value("d", 0.0),
        }
        stop_event = mp.Event()
        model_shape = (self.config.model.height, self.config.model.width)

        process_frames(
            self.camera_name,
            self.frame_queue,
            self.frame_shape,
            model_shape,
            self.frame_manager,
            motion_detector,
            object_detector,
            object_tracker,
            self.detected_objects_queue,
            process_info,
            objects_to_track,
            object_filters,
            mask,
            stop_event,
            exit_on_empty=True,
        )

    def top_object(self, debug_path=None):
        obj_detected = False
        top_computed_score = 0.0

        def handle_event(name, obj, frame_time):
            nonlocal obj_detected
            nonlocal top_computed_score
            if obj.computed_score > top_computed_score:
                top_computed_score = obj.computed_score
            if not obj.false_positive:
                obj_detected = True

        self.camera_state.on("new", handle_event)
        self.camera_state.on("update", handle_event)

        while not self.detected_objects_queue.empty():
            (
                camera_name,
                frame_time,
                current_tracked_objects,
                motion_boxes,
                regions,
            ) = self.detected_objects_queue.get()
            if not debug_path is None:
                self.save_debug_frame(debug_path, frame_time,
                                      current_tracked_objects.values())

            self.camera_state.update(frame_time, current_tracked_objects,
                                     motion_boxes, regions)

        self.frame_manager.delete(self.camera_state.previous_frame_id)

        return {
            "object_detected": obj_detected,
            "top_score": top_computed_score
        }

    def save_debug_frame(self, debug_path, frame_time, tracked_objects):
        current_frame = cv2.cvtColor(
            self.frame_manager.get(f"{self.camera_name}{frame_time}",
                                   self.camera_config.frame_shape_yuv),
            cv2.COLOR_YUV2BGR_I420,
        )
        # draw the bounding boxes on the frame
        for obj in tracked_objects:
            thickness = 2
            color = (0, 0, 175)

            if obj["frame_time"] != frame_time:
                thickness = 1
                color = (255, 0, 0)
            else:
                color = (255, 255, 0)

            # draw the bounding boxes on the frame
            box = obj["box"]
            draw_box_with_label(
                current_frame,
                box[0],
                box[1],
                box[2],
                box[3],
                obj["id"],
                f"{int(obj['score']*100)}% {int(obj['area'])}",
                thickness=thickness,
                color=color,
            )
            # draw the regions on the frame
            region = obj["region"]
            draw_box_with_label(
                current_frame,
                region[0],
                region[1],
                region[2],
                region[3],
                "region",
                "",
                thickness=1,
                color=(0, 255, 0),
            )

        cv2.imwrite(
            f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg",
            current_frame,
        )
Exemple #9
0
    def __init__(self, camera_config, client, topic_prefix, tracked_objects_queue, event_queue, stop_event):
        threading.Thread.__init__(self)
        self.camera_config = camera_config
        self.client = client
        self.topic_prefix = topic_prefix
        self.tracked_objects_queue = tracked_objects_queue
        self.event_queue = event_queue
        self.stop_event = stop_event
        self.camera_states: Dict[str, CameraState] = {}
        self.frame_manager = SharedMemoryFrameManager()

        def start(camera, obj):
            # publish events to mqtt
            self.client.publish(f"{self.topic_prefix}/{camera}/events/start", json.dumps(obj), retain=False)
            self.event_queue.put(('start', camera, obj))

        def update(camera, obj):
            pass

        def end(camera, obj):
            self.client.publish(f"{self.topic_prefix}/{camera}/events/end", json.dumps(obj), retain=False)
            self.event_queue.put(('end', camera, obj))
        
        def snapshot(camera, obj):
            if not 'frame' in obj:
                return
            
            best_frame = cv2.cvtColor(obj['frame'], cv2.COLOR_YUV2BGR_I420)
            if self.camera_config[camera]['snapshots']['draw_bounding_boxes']:
                thickness = 2
                color = COLOR_MAP[obj['label']]
                box = obj['box']
                draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
                
            mqtt_config = self.camera_config[camera].get('mqtt', {'crop_to_region': False})
            if mqtt_config.get('crop_to_region'):
                region = obj['region']
                best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
            if 'snapshot_height' in mqtt_config: 
                height = int(mqtt_config['snapshot_height'])
                width = int(height*best_frame.shape[1]/best_frame.shape[0])
                best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
            
            if self.camera_config[camera]['snapshots']['show_timestamp']:
                time_to_show = datetime.datetime.fromtimestamp(obj['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
                size = cv2.getTextSize(time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2)
                text_width = size[0][0]
                text_height = size[0][1]
                desired_size = max(200, 0.33*best_frame.shape[1])
                font_scale = desired_size/text_width
                cv2.putText(best_frame, time_to_show, (5, best_frame.shape[0]-7), cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, color=(255, 255, 255), thickness=2)

            ret, jpg = cv2.imencode('.jpg', best_frame)
            if ret:
                jpg_bytes = jpg.tobytes()
                self.client.publish(f"{self.topic_prefix}/{camera}/{obj['label']}/snapshot", jpg_bytes, retain=True)
        
        def object_status(camera, object_name, status):
            self.client.publish(f"{self.topic_prefix}/{camera}/{object_name}", status, retain=False)

        for camera in self.camera_config.keys():
            camera_state = CameraState(camera, self.camera_config[camera], self.frame_manager)
            camera_state.on('start', start)
            camera_state.on('update', update)
            camera_state.on('end', end)
            camera_state.on('snapshot', snapshot)
            camera_state.on('object_status', object_status)
            self.camera_states[camera] = camera_state

        self.camera_data = defaultdict(lambda: {
            'best_objects': {},
            'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')),
            'tracked_objects': {},
            'current_frame': np.zeros((720,1280,3), np.uint8),
            'current_frame_time': 0.0,
            'object_id': None
        })
        # {
        #   'zone_name': {
        #       'person': ['camera_1', 'camera_2']
        #   }
        # }
        self.zone_data = defaultdict(lambda: defaultdict(lambda: set()))

        # set colors for zones
        all_zone_names = set([zone for config in self.camera_config.values() for zone in config['zones'].keys()])
        zone_colors = {}
        colors = plt.cm.get_cmap('tab10', len(all_zone_names))
        for i, zone in enumerate(all_zone_names):
            zone_colors[zone] = tuple(int(round(255 * c)) for c in colors(i)[:3])

        # create zone contours
        for camera_config in self.camera_config.values():
            for zone_name, zone_config in camera_config['zones'].items():
                zone_config['color'] = zone_colors[zone_name]
                coordinates = zone_config['coordinates']
                if isinstance(coordinates, list):
                    zone_config['contour'] =  np.array([[int(p.split(',')[0]), int(p.split(',')[1])] for p in coordinates])
                elif isinstance(coordinates, str):
                    points = coordinates.split(',')
                    zone_config['contour'] =  np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
                else:
                    print(f"Unable to parse zone coordinates for {zone_name} - {camera}")
Exemple #10
0
def output_frames(config: FrigateConfig, video_output_queue):
    threading.current_thread().name = f"output"
    setproctitle(f"frigate.output")

    stop_event = mp.Event()

    def receiveSignal(signalNumber, frame):
        stop_event.set()

    signal.signal(signal.SIGTERM, receiveSignal)
    signal.signal(signal.SIGINT, receiveSignal)

    frame_manager = SharedMemoryFrameManager()
    previous_frames = {}

    # start a websocket server on 8082
    WebSocketWSGIHandler.http_version = "1.1"
    websocket_server = make_server(
        "127.0.0.1",
        8082,
        server_class=WSGIServer,
        handler_class=WebSocketWSGIRequestHandler,
        app=WebSocketWSGIApplication(handler_cls=WebSocket),
    )
    websocket_server.initialize_websockets_manager()
    websocket_thread = threading.Thread(target=websocket_server.serve_forever)

    converters = {}
    broadcasters = {}

    for camera, cam_config in config.cameras.items():
        width = int(cam_config.live.height *
                    (cam_config.frame_shape[1] / cam_config.frame_shape[0]))
        converters[camera] = FFMpegConverter(
            cam_config.frame_shape[1],
            cam_config.frame_shape[0],
            width,
            cam_config.live.height,
            cam_config.live.quality,
        )
        broadcasters[camera] = BroadcastThread(camera, converters[camera],
                                               websocket_server)

    if config.birdseye.enabled:
        converters["birdseye"] = FFMpegConverter(
            config.birdseye.width,
            config.birdseye.height,
            config.birdseye.width,
            config.birdseye.height,
            config.birdseye.quality,
        )
        broadcasters["birdseye"] = BroadcastThread("birdseye",
                                                   converters["birdseye"],
                                                   websocket_server)

    websocket_thread.start()

    for t in broadcasters.values():
        t.start()

    birdseye_manager = BirdsEyeFrameManager(config, frame_manager)

    while not stop_event.is_set():
        try:
            (
                camera,
                frame_time,
                current_tracked_objects,
                motion_boxes,
                regions,
            ) = video_output_queue.get(True, 10)
        except queue.Empty:
            continue

        frame_id = f"{camera}{frame_time}"

        frame = frame_manager.get(frame_id,
                                  config.cameras[camera].frame_shape_yuv)

        # send camera frame to ffmpeg process if websockets are connected
        if any(ws.environ["PATH_INFO"].endswith(camera)
               for ws in websocket_server.manager):
            # write to the converter for the camera if clients are listening to the specific camera
            converters[camera].write(frame.tobytes())

        # update birdseye if websockets are connected
        if config.birdseye.enabled and any(
                ws.environ["PATH_INFO"].endswith("birdseye")
                for ws in websocket_server.manager):
            if birdseye_manager.update(
                    camera,
                    len(current_tracked_objects),
                    len(motion_boxes),
                    frame_time,
                    frame,
            ):
                converters["birdseye"].write(birdseye_manager.frame.tobytes())

        if camera in previous_frames:
            frame_manager.delete(f"{camera}{previous_frames[camera]}")

        previous_frames[camera] = frame_time

    while not video_output_queue.empty():
        (
            camera,
            frame_time,
            current_tracked_objects,
            motion_boxes,
            regions,
        ) = video_output_queue.get(True, 10)

        frame_id = f"{camera}{frame_time}"
        frame = frame_manager.get(frame_id,
                                  config.cameras[camera].frame_shape_yuv)
        frame_manager.delete(frame_id)

    for c in converters.values():
        c.exit()
    for b in broadcasters.values():
        b.join()
    websocket_server.manager.close_all()
    websocket_server.manager.stop()
    websocket_server.manager.join()
    websocket_server.shutdown()
    websocket_thread.join()
    logger.info("exiting output process...")
Exemple #11
0
    def __init__(
        self,
        config: FrigateConfig,
        client,
        topic_prefix,
        tracked_objects_queue,
        event_queue,
        event_processed_queue,
        stop_event,
    ):
        threading.Thread.__init__(self)
        self.name = "detected_frames_processor"
        self.config = config
        self.client = client
        self.topic_prefix = topic_prefix
        self.tracked_objects_queue = tracked_objects_queue
        self.event_queue = event_queue
        self.event_processed_queue = event_processed_queue
        self.stop_event = stop_event
        self.camera_states: Dict[str, CameraState] = {}
        self.frame_manager = SharedMemoryFrameManager()

        def start(camera, obj: TrackedObject, current_frame_time):
            self.event_queue.put(("start", camera, obj.to_dict()))

        def update(camera, obj: TrackedObject, current_frame_time):
            after = obj.to_dict()
            message = {
                "before": obj.previous,
                "after": after,
                "type": "new" if obj.previous["false_positive"] else "update",
            }
            self.client.publish(f"{self.topic_prefix}/events",
                                json.dumps(message),
                                retain=False)
            obj.previous = after

        def end(camera, obj: TrackedObject, current_frame_time):
            snapshot_config = self.config.cameras[camera].snapshots
            event_data = obj.to_dict(include_thumbnail=True)
            event_data["has_snapshot"] = False
            if not obj.false_positive:
                message = {
                    "before": obj.previous,
                    "after": obj.to_dict(),
                    "type": "end",
                }
                self.client.publish(f"{self.topic_prefix}/events",
                                    json.dumps(message),
                                    retain=False)
                # write snapshot to disk if enabled
                if snapshot_config.enabled and self.should_save_snapshot(
                        camera, obj):
                    jpg_bytes = obj.get_jpg_bytes(
                        timestamp=snapshot_config.timestamp,
                        bounding_box=snapshot_config.bounding_box,
                        crop=snapshot_config.crop,
                        height=snapshot_config.height,
                    )
                    if jpg_bytes is None:
                        logger.warning(
                            f"Unable to save snapshot for {obj.obj_data['id']}."
                        )
                    else:
                        with open(
                                os.path.join(
                                    CLIPS_DIR,
                                    f"{camera}-{obj.obj_data['id']}.jpg"),
                                "wb",
                        ) as j:
                            j.write(jpg_bytes)
                        event_data["has_snapshot"] = True
            self.event_queue.put(("end", camera, event_data))

        def snapshot(camera, obj: TrackedObject, current_frame_time):
            mqtt_config = self.config.cameras[camera].mqtt
            if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
                jpg_bytes = obj.get_jpg_bytes(
                    timestamp=mqtt_config.timestamp,
                    bounding_box=mqtt_config.bounding_box,
                    crop=mqtt_config.crop,
                    height=mqtt_config.height,
                )

                if jpg_bytes is None:
                    logger.warning(
                        f"Unable to send mqtt snapshot for {obj.obj_data['id']}."
                    )
                else:
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot",
                        jpg_bytes,
                        retain=True,
                    )

        def object_status(camera, object_name, status):
            self.client.publish(f"{self.topic_prefix}/{camera}/{object_name}",
                                status,
                                retain=False)

        for camera in self.config.cameras.keys():
            camera_state = CameraState(camera, self.config, self.frame_manager)
            camera_state.on("start", start)
            camera_state.on("update", update)
            camera_state.on("end", end)
            camera_state.on("snapshot", snapshot)
            camera_state.on("object_status", object_status)
            self.camera_states[camera] = camera_state

        # {
        #   'zone_name': {
        #       'person': {
        #           'camera_1': 2,
        #           'camera_2': 1
        #       }
        #   }
        # }
        self.zone_data = defaultdict(lambda: defaultdict(dict))
Exemple #12
0
    def __init__(self, config: FrigateConfig, client, topic_prefix,
                 tracked_objects_queue, event_queue, event_processed_queue,
                 stop_event):
        threading.Thread.__init__(self)
        self.name = "detected_frames_processor"
        self.config = config
        self.client = client
        self.topic_prefix = topic_prefix
        self.tracked_objects_queue = tracked_objects_queue
        self.event_queue = event_queue
        self.event_processed_queue = event_processed_queue
        self.stop_event = stop_event
        self.camera_states: Dict[str, CameraState] = {}
        self.frame_manager = SharedMemoryFrameManager()

        def start(camera, obj: TrackedObject, current_frame_time):
            self.event_queue.put(('start', camera, obj.to_dict()))

        def update(camera, obj: TrackedObject, current_frame_time):
            after = obj.to_dict()
            message = {
                'before': obj.previous,
                'after': after,
                'type': 'new' if obj.previous['false_positive'] else 'update'
            }
            self.client.publish(f"{self.topic_prefix}/events",
                                json.dumps(message),
                                retain=False)
            obj.previous = after

        def end(camera, obj: TrackedObject, current_frame_time):
            snapshot_config = self.config.cameras[camera].snapshots
            event_data = obj.to_dict(include_thumbnail=True)
            event_data['has_snapshot'] = False
            if not obj.false_positive:
                message = {
                    'before': obj.previous,
                    'after': obj.to_dict(),
                    'type': 'end'
                }
                self.client.publish(f"{self.topic_prefix}/events",
                                    json.dumps(message),
                                    retain=False)
                # write snapshot to disk if enabled
                if snapshot_config.enabled:
                    jpg_bytes = obj.get_jpg_bytes(
                        timestamp=snapshot_config.timestamp,
                        bounding_box=snapshot_config.bounding_box,
                        crop=snapshot_config.crop,
                        height=snapshot_config.height)
                    with open(
                            os.path.join(CLIPS_DIR,
                                         f"{camera}-{obj.obj_data['id']}.jpg"),
                            'wb') as j:
                        j.write(jpg_bytes)
                    event_data['has_snapshot'] = True
            self.event_queue.put(('end', camera, event_data))

        def snapshot(camera, obj: TrackedObject, current_frame_time):
            mqtt_config = self.config.cameras[camera].mqtt
            if mqtt_config.enabled:
                jpg_bytes = obj.get_jpg_bytes(
                    timestamp=mqtt_config.timestamp,
                    bounding_box=mqtt_config.bounding_box,
                    crop=mqtt_config.crop,
                    height=mqtt_config.height)
                self.client.publish(
                    f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot",
                    jpg_bytes,
                    retain=True)

        def object_status(camera, object_name, status):
            self.client.publish(f"{self.topic_prefix}/{camera}/{object_name}",
                                status,
                                retain=False)

        for camera in self.config.cameras.keys():
            camera_state = CameraState(camera, self.config, self.frame_manager)
            camera_state.on('start', start)
            camera_state.on('update', update)
            camera_state.on('end', end)
            camera_state.on('snapshot', snapshot)
            camera_state.on('object_status', object_status)
            self.camera_states[camera] = camera_state

        # {
        #   'zone_name': {
        #       'person': {
        #           'camera_1': 2,
        #           'camera_2': 1
        #       }
        #   }
        # }
        self.zone_data = defaultdict(lambda: defaultdict(lambda: {}))