コード例 #1
0
class CameraCapture(threading.Thread):
    def __init__(self, name, ffmpeg_process, frame_shape, frame_queue,
                 take_frame, fps, detection_frame):
        threading.Thread.__init__(self)
        self.name = name
        self.frame_shape = frame_shape
        self.frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
        self.frame_queue = frame_queue
        self.take_frame = take_frame
        self.fps = fps
        self.skipped_fps = EventsPerSecond()
        self.plasma_client = PlasmaManager()
        self.ffmpeg_process = ffmpeg_process
        self.current_frame = 0
        self.last_frame = 0
        self.detection_frame = detection_frame

    def run(self):
        frame_num = 0
        self.skipped_fps.start()
        while True:
            if self.ffmpeg_process.poll() != None:
                print(
                    f"{self.name}: ffmpeg process is not running. exiting capture thread..."
                )
                break

            frame_bytes = self.ffmpeg_process.stdout.read(self.frame_size)
            self.current_frame = datetime.datetime.now().timestamp()

            if len(frame_bytes) == 0:
                print(
                    f"{self.name}: ffmpeg didnt return a frame. something is wrong."
                )
                continue

            self.fps.update()

            frame_num += 1
            if (frame_num % self.take_frame) != 0:
                self.skipped_fps.update()
                continue

            # if the detection process is more than 1 second behind, skip this frame
            if self.detection_frame.value > 0.0 and (
                    self.last_frame - self.detection_frame.value) > 1:
                self.skipped_fps.update()
                continue

            # put the frame in the plasma store
            self.plasma_client.put(
                f"{self.name}{self.current_frame}",
                np.frombuffer(frame_bytes, np.uint8).reshape(self.frame_shape))
            # add to the queue
            self.frame_queue.put(self.current_frame)
            self.last_frame = self.current_frame
コード例 #2
0
    def __init__(self, camera_config, zone_config, client, topic_prefix,
                 tracked_objects_queue, event_queue, stop_event):
        threading.Thread.__init__(self)
        self.camera_config = camera_config
        self.zone_config = zone_config
        self.client = client
        self.topic_prefix = topic_prefix
        self.tracked_objects_queue = tracked_objects_queue
        self.event_queue = event_queue
        self.stop_event = stop_event
        self.camera_data = defaultdict(
            lambda: {
                'best_objects': {},
                'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')
                                             ),
                'tracked_objects': {},
                'current_frame': np.zeros((720, 1280, 3), np.uint8),
                'current_frame_time': 0.0,
                'object_id': None
            })
        self.zone_data = defaultdict(
            lambda: {
                'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')
                                             ),
                'contours': {}
            })

        # create zone contours
        for name, config in zone_config.items():
            for camera, camera_zone_config in config.items():
                coordinates = camera_zone_config['coordinates']
                if isinstance(coordinates, list):
                    self.zone_data[name]['contours'][camera] = np.array(
                        [[int(p.split(',')[0]),
                          int(p.split(',')[1])] for p in coordinates])
                elif isinstance(coordinates, str):
                    points = coordinates.split(',')
                    self.zone_data[name]['contours'][camera] = np.array(
                        [[int(points[i]), int(points[i + 1])]
                         for i in range(0, len(points), 2)])
                else:
                    print(
                        f"Unable to parse zone coordinates for {name} - {camera}"
                    )

        # set colors for zones
        colors = plt.cm.get_cmap('tab10', len(self.zone_data.keys()))
        for i, zone in enumerate(self.zone_data.values()):
            zone['color'] = tuple(int(round(255 * c)) for c in colors(i)[:3])

        self.plasma_client = PlasmaManager(self.stop_event)
コード例 #3
0
ファイル: video.py プロジェクト: simonbirt/frigate
 def __init__(self, name, ffmpeg_process, frame_shape, frame_queue, take_frame, fps, detection_frame):
     threading.Thread.__init__(self)
     self.name = name
     self.frame_shape = frame_shape
     self.frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
     self.frame_queue = frame_queue
     self.take_frame = take_frame
     self.fps = fps
     self.skipped_fps = EventsPerSecond()
     self.plasma_client = PlasmaManager()
     self.ffmpeg_process = ffmpeg_process
     self.current_frame = 0
     self.last_frame = 0
     self.detection_frame = detection_frame
コード例 #4
0
 def __init__(self, config, client, topic_prefix, tracked_objects_queue):
     threading.Thread.__init__(self)
     self.config = config
     self.client = client
     self.topic_prefix = topic_prefix
     self.tracked_objects_queue = tracked_objects_queue
     self.camera_data = defaultdict(
         lambda: {
             'best_objects': {},
             'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')
                                          ),
             'tracked_objects': {},
             'current_frame': np.zeros((720, 1280, 3), np.uint8),
             'current_frame_time': 0.0,
             'object_id': None
         })
     self.plasma_client = PlasmaManager()
コード例 #5
0
class TrackedObjectProcessor(threading.Thread):
    def __init__(self, camera_config, zone_config, client, topic_prefix,
                 tracked_objects_queue, event_queue, stop_event):
        threading.Thread.__init__(self)
        self.camera_config = camera_config
        self.zone_config = zone_config
        self.client = client
        self.topic_prefix = topic_prefix
        self.tracked_objects_queue = tracked_objects_queue
        self.event_queue = event_queue
        self.stop_event = stop_event
        self.camera_data = defaultdict(
            lambda: {
                'best_objects': {},
                'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')
                                             ),
                'tracked_objects': {},
                'current_frame': np.zeros((720, 1280, 3), np.uint8),
                'current_frame_time': 0.0,
                'object_id': None
            })
        self.zone_data = defaultdict(
            lambda: {
                'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')
                                             ),
                'contours': {}
            })

        # create zone contours
        for name, config in zone_config.items():
            for camera, camera_zone_config in config.items():
                coordinates = camera_zone_config['coordinates']
                if isinstance(coordinates, list):
                    self.zone_data[name]['contours'][camera] = np.array(
                        [[int(p.split(',')[0]),
                          int(p.split(',')[1])] for p in coordinates])
                elif isinstance(coordinates, str):
                    points = coordinates.split(',')
                    self.zone_data[name]['contours'][camera] = np.array(
                        [[int(points[i]), int(points[i + 1])]
                         for i in range(0, len(points), 2)])
                else:
                    print(
                        f"Unable to parse zone coordinates for {name} - {camera}"
                    )

        # set colors for zones
        colors = plt.cm.get_cmap('tab10', len(self.zone_data.keys()))
        for i, zone in enumerate(self.zone_data.values()):
            zone['color'] = tuple(int(round(255 * c)) for c in colors(i)[:3])

        self.plasma_client = PlasmaManager(self.stop_event)

    def get_best(self, camera, label):
        if label in self.camera_data[camera]['best_objects']:
            return self.camera_data[camera]['best_objects'][label]['frame']
        else:
            return None

    def get_current_frame(self, camera):
        return self.camera_data[camera]['current_frame']

    def run(self):
        while True:
            if self.stop_event.is_set():
                print(f"Exiting object processor...")
                break

            try:
                camera, frame_time, current_tracked_objects = self.tracked_objects_queue.get(
                    True, 10)
            except queue.Empty:
                continue

            camera_config = self.camera_config[camera]
            best_objects = self.camera_data[camera]['best_objects']
            current_object_status = self.camera_data[camera]['object_status']
            tracked_objects = self.camera_data[camera]['tracked_objects']

            current_ids = current_tracked_objects.keys()
            previous_ids = tracked_objects.keys()
            removed_ids = list(set(previous_ids).difference(current_ids))
            new_ids = list(set(current_ids).difference(previous_ids))
            updated_ids = list(set(current_ids).intersection(previous_ids))

            for id in new_ids:
                # only register the object here if we are sure it isnt a false positive
                if not filter_false_positives(current_tracked_objects[id]):
                    tracked_objects[id] = current_tracked_objects[id]
                    # publish events to mqtt
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/events/start",
                        json.dumps(tracked_objects[id]),
                        retain=False)
                    self.event_queue.put(
                        ('start', camera, tracked_objects[id]))

            for id in updated_ids:
                tracked_objects[id] = current_tracked_objects[id]

            for id in removed_ids:
                # publish events to mqtt
                tracked_objects[id]['end_time'] = frame_time
                self.client.publish(f"{self.topic_prefix}/{camera}/events/end",
                                    json.dumps(tracked_objects[id]),
                                    retain=False)
                self.event_queue.put(('end', camera, tracked_objects[id]))
                del tracked_objects[id]

            self.camera_data[camera]['current_frame_time'] = frame_time

            # build a dict of objects in each zone for current camera
            current_objects_in_zones = defaultdict(lambda: [])
            for obj in tracked_objects.values():
                bottom_center = (obj['centroid'][0], obj['box'][3])
                # check each zone
                for name, zone in self.zone_data.items():
                    current_contour = zone['contours'].get(camera, None)
                    # if the current camera does not have a contour for this zone, skip
                    if current_contour is None:
                        continue
                    # check if the object is in the zone and not filtered
                    if (cv2.pointPolygonTest(current_contour, bottom_center,
                                             False) >= 0
                            and not zone_filtered(
                                obj, self.zone_config[name][camera].get(
                                    'filters', {}))):
                        current_objects_in_zones[name].append(obj['label'])

            ###
            # Draw tracked objects on the frame
            ###
            current_frame = self.plasma_client.get(f"{camera}{frame_time}")

            if not current_frame is plasma.ObjectNotAvailable:
                # draw the bounding boxes on the frame
                for obj in tracked_objects.values():
                    thickness = 2
                    color = COLOR_MAP[obj['label']]

                    if obj['frame_time'] != frame_time:
                        thickness = 1
                        color = (255, 0, 0)

                    # draw the bounding boxes on the frame
                    box = obj['box']
                    draw_box_with_label(
                        current_frame,
                        box[0],
                        box[1],
                        box[2],
                        box[3],
                        obj['label'],
                        f"{int(obj['score']*100)}% {int(obj['area'])}",
                        thickness=thickness,
                        color=color)
                    # draw the regions on the frame
                    region = obj['region']
                    cv2.rectangle(current_frame, (region[0], region[1]),
                                  (region[2], region[3]), (0, 255, 0), 1)

                if camera_config['snapshots']['show_timestamp']:
                    time_to_show = datetime.datetime.fromtimestamp(
                        frame_time).strftime("%m/%d/%Y %H:%M:%S")
                    cv2.putText(current_frame,
                                time_to_show, (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=.8,
                                color=(255, 255, 255),
                                thickness=2)

                if camera_config['snapshots']['draw_zones']:
                    for name, zone in self.zone_data.items():
                        thickness = 2 if len(
                            current_objects_in_zones[name]) == 0 else 8
                        if camera in zone['contours']:
                            cv2.drawContours(current_frame,
                                             [zone['contours'][camera]], -1,
                                             zone['color'], thickness)

                ###
                # Set the current frame
                ###
                self.camera_data[camera]['current_frame'] = current_frame

                # delete the previous frame from the plasma store and update the object id
                if not self.camera_data[camera]['object_id'] is None:
                    self.plasma_client.delete(
                        self.camera_data[camera]['object_id'])
                self.camera_data[camera]['object_id'] = f"{camera}{frame_time}"

            ###
            # Maintain the highest scoring recent object and frame for each label
            ###
            for obj in tracked_objects.values():
                # if the object wasn't seen on the current frame, skip it
                if obj['frame_time'] != frame_time:
                    continue
                if obj['label'] in best_objects:
                    now = datetime.datetime.now().timestamp()
                    # if the object is a higher score than the current best score
                    # or the current object is more than 1 minute old, use the new object
                    if obj['score'] > best_objects[obj['label']]['score'] or (
                            now -
                            best_objects[obj['label']]['frame_time']) > 60:
                        obj['frame'] = np.copy(
                            self.camera_data[camera]['current_frame'])
                        best_objects[obj['label']] = obj
                        # send updated snapshot over mqtt
                        best_frame = cv2.cvtColor(obj['frame'],
                                                  cv2.COLOR_RGB2BGR)
                        ret, jpg = cv2.imencode('.jpg', best_frame)
                        if ret:
                            jpg_bytes = jpg.tobytes()
                            self.client.publish(
                                f"{self.topic_prefix}/{camera}/{obj['label']}/snapshot",
                                jpg_bytes,
                                retain=True)
                else:
                    obj['frame'] = np.copy(
                        self.camera_data[camera]['current_frame'])
                    best_objects[obj['label']] = obj

            ###
            # Report over MQTT
            ###

            # get the zones that are relevant for this camera
            relevant_zones = [
                zone for zone, config in self.zone_config.items()
                if camera in config
            ]
            for zone in relevant_zones:
                # create the set of labels in the current frame and previously reported
                labels_for_zone = set(
                    current_objects_in_zones[zone] +
                    list(self.zone_data[zone]['object_status'][camera].keys()))
                # for each label
                for label in labels_for_zone:
                    # compute the current 'ON' vs 'OFF' status by checking if any camera sees the object in the zone
                    previous_state = any([
                        c[label] == 'ON' for c in self.zone_data[zone]
                        ['object_status'].values()
                    ])
                    self.zone_data[zone]['object_status'][camera][
                        label] = 'ON' if label in current_objects_in_zones[
                            zone] else 'OFF'
                    new_state = any([
                        c[label] == 'ON' for c in self.zone_data[zone]
                        ['object_status'].values()
                    ])
                    # if the value is changing, send over MQTT
                    if previous_state == False and new_state == True:
                        self.client.publish(
                            f"{self.topic_prefix}/{zone}/{label}",
                            'ON',
                            retain=False)
                    elif previous_state == True and new_state == False:
                        self.client.publish(
                            f"{self.topic_prefix}/{zone}/{label}",
                            'OFF',
                            retain=False)

            # count  by type
            obj_counter = Counter()
            for obj in tracked_objects.values():
                obj_counter[obj['label']] += 1

            # report on detected objects
            for obj_name, count in obj_counter.items():
                new_status = 'ON' if count > 0 else 'OFF'
                if new_status != current_object_status[obj_name]:
                    current_object_status[obj_name] = new_status
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}",
                        new_status,
                        retain=False)
                    # send the best snapshot over mqtt
                    best_frame = cv2.cvtColor(best_objects[obj_name]['frame'],
                                              cv2.COLOR_RGB2BGR)
                    ret, jpg = cv2.imencode('.jpg', best_frame)
                    if ret:
                        jpg_bytes = jpg.tobytes()
                        self.client.publish(
                            f"{self.topic_prefix}/{camera}/{obj_name}/snapshot",
                            jpg_bytes,
                            retain=True)

            # expire any objects that are ON and no longer detected
            expired_objects = [
                obj_name for obj_name, status in current_object_status.items()
                if status == 'ON' and not obj_name in obj_counter
            ]
            for obj_name in expired_objects:
                current_object_status[obj_name] = 'OFF'
                self.client.publish(f"{self.topic_prefix}/{camera}/{obj_name}",
                                    'OFF',
                                    retain=False)
                # send updated snapshot over mqtt
                best_frame = cv2.cvtColor(best_objects[obj_name]['frame'],
                                          cv2.COLOR_RGB2BGR)
                ret, jpg = cv2.imencode('.jpg', best_frame)
                if ret:
                    jpg_bytes = jpg.tobytes()
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}/snapshot",
                        jpg_bytes,
                        retain=True)
コード例 #6
0
class TrackedObjectProcessor(threading.Thread):
    def __init__(self, config, client, topic_prefix, tracked_objects_queue,
                 event_queue):
        threading.Thread.__init__(self)
        self.config = config
        self.client = client
        self.topic_prefix = topic_prefix
        self.tracked_objects_queue = tracked_objects_queue
        self.event_queue = event_queue
        self.camera_data = defaultdict(
            lambda: {
                'best_objects': {},
                'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')
                                             ),
                'tracked_objects': {},
                'current_frame': np.zeros((720, 1280, 3), np.uint8),
                'current_frame_time': 0.0,
                'object_id': None
            })
        self.plasma_client = PlasmaManager()
        self.room_tracker = None
        self.room_tracker_mqtt_state = {}

    def get_best(self, camera, label):
        if label in self.camera_data[camera]['best_objects']:
            return self.camera_data[camera]['best_objects'][label]['frame']
        else:
            return None

    def get_current_frame(self, camera):
        return self.camera_data[camera]['current_frame']

    def run(self):
        while True:
            camera, frame_time, current_tracked_objects = self.tracked_objects_queue.get(
            )

            config = self.config[camera]
            best_objects = self.camera_data[camera]['best_objects']
            current_object_status = self.camera_data[camera]['object_status']
            tracked_objects = self.camera_data[camera]['tracked_objects']

            current_ids = current_tracked_objects.keys()
            previous_ids = tracked_objects.keys()
            removed_ids = list(set(previous_ids).difference(current_ids))
            new_ids = list(set(current_ids).difference(previous_ids))
            updated_ids = list(set(current_ids).intersection(previous_ids))

            for id in new_ids:
                tracked_objects[id] = current_tracked_objects[id]
                # publish events to mqtt
                self.client.publish(
                    f"{self.topic_prefix}/{camera}/events/start",
                    json.dumps(tracked_objects[id]),
                    retain=False)
                self.event_queue.put(('start', camera, tracked_objects[id]))

            for id in updated_ids:
                tracked_objects[id] = current_tracked_objects[id]

            for id in removed_ids:
                # publish events to mqtt
                tracked_objects[id]['end_time'] = frame_time
                self.client.publish(f"{self.topic_prefix}/{camera}/events/end",
                                    json.dumps(tracked_objects[id]),
                                    retain=False)
                self.event_queue.put(('end', camera, tracked_objects[id]))
                del tracked_objects[id]

            self.camera_data[camera]['current_frame_time'] = frame_time

            ###
            # Update room tracker if enabled
            ###
            room_tracker_conf = config.get("room_tracker", None)
            if room_tracker_conf is not None and room_tracker_conf.get(
                    "enabled", False):
                if self.room_tracker is None:
                    self.room_tracker = RoomTracker(room_tracker_conf)
                self.room_tracker.on_change(frame_time, tracked_objects)

            ###
            # Draw tracked objects on the frame
            ###
            current_frame = self.plasma_client.get(f"{camera}{frame_time}")

            if not current_frame is plasma.ObjectNotAvailable:
                # draw the bounding boxes on the frame
                for obj in tracked_objects.values():
                    thickness = 2
                    color = COLOR_MAP[obj['label']]

                    if obj['frame_time'] != frame_time:
                        thickness = 1
                        color = (255, 0, 0)

                    # draw the bounding boxes on the frame
                    box = obj['box']
                    draw_box_with_label(
                        current_frame,
                        box[0],
                        box[1],
                        box[2],
                        box[3],
                        obj['label'],
                        f"{int(obj['score']*100)}% {int(obj['area'])}",
                        thickness=thickness,
                        color=color)
                    # draw the regions on the frame
                    region = obj['region']
                    cv2.rectangle(current_frame, (region[0], region[1]),
                                  (region[2], region[3]), (0, 255, 0), 1)

                if config['snapshots']['show_timestamp']:
                    time_to_show = datetime.datetime.fromtimestamp(
                        frame_time).strftime("%m/%d/%Y %H:%M:%S")
                    cv2.putText(current_frame,
                                time_to_show, (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=.8,
                                color=(255, 255, 255),
                                thickness=2)

                # Draw room tracker area points
                if self.room_tracker is not None:
                    for room_name, c in self.room_tracker.rooms_conf.items():
                        p = (c["point_x"], c["point_y"])
                        cv2.rectangle(current_frame, (p[0] - 10, p[1] - 10),
                                      (p[0] + 10, p[1] + 10), (255, 0, 0), 3)

                ###
                # Set the current frame
                ###
                self.camera_data[camera]['current_frame'] = current_frame

                # delete the previous frame from the plasma store and update the object id
                if not self.camera_data[camera]['object_id'] is None:
                    self.plasma_client.delete(
                        self.camera_data[camera]['object_id'])
                self.camera_data[camera]['object_id'] = f"{camera}{frame_time}"

            ###
            # Maintain the highest scoring recent object and frame for each label
            ###
            for obj in tracked_objects.values():
                # if the object wasn't seen on the current frame, skip it
                if obj['frame_time'] != frame_time:
                    continue
                if obj['label'] in best_objects:
                    now = datetime.datetime.now().timestamp()
                    # if the object is a higher score than the current best score
                    # or the current object is more than 1 minute old, use the new object
                    if obj['score'] > best_objects[obj['label']]['score'] or (
                            now -
                            best_objects[obj['label']]['frame_time']) > 60:
                        obj['frame'] = np.copy(
                            self.camera_data[camera]['current_frame'])
                        best_objects[obj['label']] = obj
                        # send updated snapshot over mqtt
                        best_frame = cv2.cvtColor(obj['frame'],
                                                  cv2.COLOR_RGB2BGR)
                        ret, jpg = cv2.imencode('.jpg', best_frame)
                        if ret:
                            jpg_bytes = jpg.tobytes()
                            self.client.publish(
                                f"{self.topic_prefix}/{camera}/{obj['label']}/snapshot",
                                jpg_bytes,
                                retain=True)
                else:
                    obj['frame'] = np.copy(
                        self.camera_data[camera]['current_frame'])
                    best_objects[obj['label']] = obj

            ###
            # Report over MQTT
            ###
            # count objects by type
            obj_counter = Counter()
            for obj in tracked_objects.values():
                obj_counter[obj['label']] += 1

            # report on detected objects
            for obj_name, count in obj_counter.items():
                new_status = 'ON' if count > 0 else 'OFF'
                if new_status != current_object_status[obj_name]:
                    current_object_status[obj_name] = new_status
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}",
                        new_status,
                        retain=False)
                    # send the best snapshot over mqtt
                    best_frame = cv2.cvtColor(best_objects[obj_name]['frame'],
                                              cv2.COLOR_RGB2BGR)
                    ret, jpg = cv2.imencode('.jpg', best_frame)
                    if ret:
                        jpg_bytes = jpg.tobytes()
                        self.client.publish(
                            f"{self.topic_prefix}/{camera}/{obj_name}/snapshot",
                            jpg_bytes,
                            retain=True)

            # expire any objects that are ON and no longer detected
            expired_objects = [
                obj_name for obj_name, status in current_object_status.items()
                if status == 'ON' and not obj_name in obj_counter
            ]
            for obj_name in expired_objects:
                current_object_status[obj_name] = 'OFF'
                self.client.publish(f"{self.topic_prefix}/{camera}/{obj_name}",
                                    'OFF',
                                    retain=False)
                # send updated snapshot over mqtt
                best_frame = cv2.cvtColor(best_objects[obj_name]['frame'],
                                          cv2.COLOR_RGB2BGR)
                ret, jpg = cv2.imencode('.jpg', best_frame)
                if ret:
                    jpg_bytes = jpg.tobytes()
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}/snapshot",
                        jpg_bytes,
                        retain=True)

            # report area tracking
            if self.room_tracker is not None:
                for room_name, _ in self.room_tracker.rooms_conf.items():
                    ppl_count = self.room_tracker.get_area_count(room_name)
                    status = "ON" if ppl_count > 0 else "OFF"
                    timestamp = self.room_tracker.get_latest_change_timestamp(
                        room_name)
                    r = {
                        "status": status,
                        "count": ppl_count,
                        "timestamp": timestamp,
                    }
                    if room_name in self.room_tracker_mqtt_state and self.room_tracker_mqtt_state[
                            room_name] == r:
                        continue
                    else:
                        self.client.publish(
                            f"{self.topic_prefix}/{camera}/area/{room_name}",
                            json.dumps(r),
                            retain=False)
                        self.room_tracker_mqtt_state[room_name] = r
コード例 #7
0
def track_camera(name, config, global_objects_config, frame_queue, frame_shape,
                 detection_queue, detected_objects_queue, fps, detection_fps,
                 read_start, detection_frame):
    print(f"Starting process for {name}: {os.getpid()}")
    listen()

    detection_frame.value = 0.0

    # Merge the tracked object config with the global config
    camera_objects_config = config.get('objects', {})
    # combine tracked objects lists
    objects_to_track = set().union(
        global_objects_config.get('track', ['person', 'car', 'truck']),
        camera_objects_config.get('track', []))
    # merge object filters
    global_object_filters = global_objects_config.get('filters', {})
    camera_object_filters = camera_objects_config.get('filters', {})
    objects_with_config = set().union(global_object_filters.keys(),
                                      camera_object_filters.keys())
    object_filters = {}
    for obj in objects_with_config:
        object_filters[obj] = {
            **global_object_filters.get(obj, {}),
            **camera_object_filters.get(obj, {})
        }

    frame = np.zeros(frame_shape, np.uint8)

    # load in the mask for object detection
    if 'mask' in config:
        mask = cv2.imread("/config/{}".format(config['mask']),
                          cv2.IMREAD_GRAYSCALE)
    else:
        mask = None

    if mask is None:
        mask = np.zeros((frame_shape[0], frame_shape[1], 1), np.uint8)
        mask[:] = 255

    motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
    object_detector = RemoteObjectDetector(name, '/labelmap.txt',
                                           detection_queue)

    camera_tracker_config = config.get('tracker', {
        "min_hits": 1,
        "max_age": 5,
        "iou_threshold": 0.2
    })
    object_tracker = ObjectTracker(camera_tracker_config["min_hits"],
                                   camera_tracker_config["max_age"],
                                   camera_tracker_config["iou_threshold"])

    plasma_client = PlasmaManager()
    avg_wait = 0.0
    fps_tracker = EventsPerSecond()
    fps_tracker.start()
    object_detector.fps.start()
    while True:
        read_start.value = datetime.datetime.now().timestamp()
        frame_time = frame_queue.get()
        duration = datetime.datetime.now().timestamp() - read_start.value
        read_start.value = 0.0
        avg_wait = (avg_wait * 99 + duration) / 100
        detection_frame.value = frame_time

        # Get frame from plasma store
        frame = plasma_client.get(f"{name}{frame_time}")

        if frame is plasma.ObjectNotAvailable:
            continue

        fps_tracker.update()
        fps.value = fps_tracker.eps()
        detection_fps.value = object_detector.fps.eps()

        # look for motion
        motion_boxes = motion_detector.detect(frame)

        tracked_objects = object_tracker.tracked_objects.values()

        # merge areas of motion that intersect with a known tracked object into a single area to look at
        areas_of_interest = []
        used_motion_boxes = []
        for obj in tracked_objects:
            x_min, y_min, x_max, y_max = obj['box']
            for m_index, motion_box in enumerate(motion_boxes):
                if intersection_over_union(motion_box, obj['box']) > .2:
                    used_motion_boxes.append(m_index)
                    x_min = min(obj['box'][0], motion_box[0])
                    y_min = min(obj['box'][1], motion_box[1])
                    x_max = max(obj['box'][2], motion_box[2])
                    y_max = max(obj['box'][3], motion_box[3])
            areas_of_interest.append((x_min, y_min, x_max, y_max))
        unused_motion_boxes = set(range(
            0, len(motion_boxes))).difference(used_motion_boxes)

        # compute motion regions
        motion_regions = [
            calculate_region(frame_shape, motion_boxes[i][0],
                             motion_boxes[i][1], motion_boxes[i][2],
                             motion_boxes[i][3], 1.2)
            for i in unused_motion_boxes
        ]

        # compute tracked object regions
        object_regions = [
            calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
            for a in areas_of_interest
        ]

        # merge regions with high IOU
        merged_regions = motion_regions + object_regions
        while True:
            max_iou = 0.0
            max_indices = None
            region_indices = range(len(merged_regions))
            for a, b in itertools.combinations(region_indices, 2):
                iou = intersection_over_union(merged_regions[a],
                                              merged_regions[b])
                if iou > max_iou:
                    max_iou = iou
                    max_indices = (a, b)
            if max_iou > 0.1:
                a = merged_regions[max_indices[0]]
                b = merged_regions[max_indices[1]]
                merged_regions.append(
                    calculate_region(frame_shape, min(a[0], b[0]),
                                     min(a[1], b[1]), max(a[2], b[2]),
                                     max(a[3], b[3]), 1))
                del merged_regions[max(max_indices[0], max_indices[1])]
                del merged_regions[min(max_indices[0], max_indices[1])]
            else:
                break

        # resize regions and detect
        detections = []
        for region in merged_regions:

            tensor_input = create_tensor_input(frame, region)

            region_detections = object_detector.detect(tensor_input)

            for d in region_detections:
                box = d[2]
                size = region[2] - region[0]
                x_min = int((box[1] * size) + region[0])
                y_min = int((box[0] * size) + region[1])
                x_max = int((box[3] * size) + region[0])
                y_max = int((box[2] * size) + region[1])
                det = (d[0], d[1], (x_min, y_min, x_max, y_max),
                       (x_max - x_min) * (y_max - y_min), region)
                if filtered(det, objects_to_track, object_filters, mask):
                    continue
                detections.append(det)

        #########
        # merge objects, check for clipped objects and look again up to N times
        #########
        refining = True
        refine_count = 0
        while refining and refine_count < 4:
            refining = False

            # group by name
            detected_object_groups = defaultdict(lambda: [])
            for detection in detections:
                detected_object_groups[detection[0]].append(detection)

            selected_objects = []
            for group in detected_object_groups.values():

                # apply non-maxima suppression to suppress weak, overlapping bounding boxes
                boxes = [(o[2][0], o[2][1], o[2][2] - o[2][0],
                          o[2][3] - o[2][1]) for o in group]
                confidences = [o[1] for o in group]
                idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)

                for index in idxs:
                    obj = group[index[0]]
                    if clipped(obj, frame_shape):
                        box = obj[2]
                        # calculate a new region that will hopefully get the entire object
                        region = calculate_region(frame_shape, box[0], box[1],
                                                  box[2], box[3])

                        tensor_input = create_tensor_input(frame, region)
                        # run detection on new region
                        refined_detections = object_detector.detect(
                            tensor_input)
                        for d in refined_detections:
                            box = d[2]
                            size = region[2] - region[0]
                            x_min = int((box[1] * size) + region[0])
                            y_min = int((box[0] * size) + region[1])
                            x_max = int((box[3] * size) + region[0])
                            y_max = int((box[2] * size) + region[1])
                            det = (d[0], d[1], (x_min, y_min, x_max, y_max),
                                   (x_max - x_min) * (y_max - y_min), region)
                            if filtered(det, objects_to_track, object_filters,
                                        mask):
                                continue
                            selected_objects.append(det)

                        refining = True
                    else:
                        selected_objects.append(obj)

            # set the detections list to only include top, complete objects
            # and new detections
            detections = selected_objects

            if refining:
                refine_count += 1

        # now that we have refined our detections, we need to track objects
        object_tracker.match_and_update(frame_time, detections)

        # add to the queue
        detected_objects_queue.put(
            (name, frame_time, object_tracker.tracked_objects))

    print(f"{name}: exiting subprocess")
コード例 #8
0
class TrackedObjectProcessor(threading.Thread):
    def __init__(self, config, client, topic_prefix, tracked_objects_queue):
        threading.Thread.__init__(self)
        self.config = config
        self.client = client
        self.topic_prefix = topic_prefix
        self.tracked_objects_queue = tracked_objects_queue
        self.camera_data = defaultdict(
            lambda: {
                'best_objects': {},
                'object_status': defaultdict(lambda: defaultdict(lambda: 'OFF')
                                             ),
                'tracked_objects': {},
                'current_frame': np.zeros((720, 1280, 3), np.uint8),
                'current_frame_time': 0.0,
                'object_id': None
            })
        self.plasma_client = PlasmaManager()

    def get_best(self, camera, label):
        if label in self.camera_data[camera]['best_objects']:
            return self.camera_data[camera]['best_objects'][label]['frame']
        else:
            return None

    def get_current_frame(self, camera):
        return self.camera_data[camera]['current_frame']

    def run(self):
        while True:
            camera, frame_time, tracked_objects = self.tracked_objects_queue.get(
            )

            config = self.config[camera]
            best_objects = self.camera_data[camera]['best_objects']
            current_object_status = self.camera_data[camera]['object_status']
            self.camera_data[camera]['tracked_objects'] = tracked_objects
            self.camera_data[camera]['current_frame_time'] = frame_time

            ###
            # Draw tracked objects on the frame
            ###
            current_frame = self.plasma_client.get(f"{camera}{frame_time}")

            if not current_frame is plasma.ObjectNotAvailable:
                # draw the bounding boxes on the frame
                for obj in tracked_objects.values():
                    thickness = 2
                    color = COLOR_MAP[obj['label']]

                    if obj['frame_time'] != frame_time:
                        thickness = 1
                        color = (255, 0, 0)

                    # draw the bounding boxes on the frame
                    box = obj['box']
                    draw_box_with_label(
                        current_frame,
                        box[0],
                        box[1],
                        box[2],
                        box[3],
                        obj['label'],
                        f"{int(obj['score']*100)}% {int(obj['area'])}",
                        thickness=thickness,
                        color=color)
                    # draw the regions on the frame
                    region = obj['region']
                    cv2.rectangle(current_frame, (region[0], region[1]),
                                  (region[2], region[3]), (0, 255, 0), 1)

                if config['snapshots']['show_timestamp']:
                    time_to_show = datetime.datetime.fromtimestamp(
                        frame_time).strftime("%m/%d/%Y %H:%M:%S")
                    cv2.putText(current_frame,
                                time_to_show, (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=.8,
                                color=(255, 255, 255),
                                thickness=2)

                ###
                # Set the current frame
                ###
                self.camera_data[camera]['current_frame'] = current_frame

                # delete the previous frame from the plasma store and update the object id
                if not self.camera_data[camera]['object_id'] is None:
                    self.plasma_client.delete(
                        self.camera_data[camera]['object_id'])
                self.camera_data[camera]['object_id'] = f"{camera}{frame_time}"

            ###
            # Maintain the highest scoring recent object and frame for each label
            ###
            for obj in tracked_objects.values():
                # if the object wasn't seen on the current frame, skip it
                if obj['frame_time'] != frame_time:
                    continue
                if obj['label'] in best_objects:
                    now = datetime.datetime.now().timestamp()
                    # if the object is a higher score than the current best score
                    # or the current object is more than 1 minute old, use the new object
                    if obj['score'] > best_objects[obj['label']]['score'] or (
                            now -
                            best_objects[obj['label']]['frame_time']) > 60:
                        obj['frame'] = np.copy(
                            self.camera_data[camera]['current_frame'])
                        best_objects[obj['label']] = obj
                else:
                    obj['frame'] = np.copy(
                        self.camera_data[camera]['current_frame'])
                    best_objects[obj['label']] = obj

            ###
            # Report over MQTT
            ###
            # count objects with more than 2 entries in history by type
            obj_counter = Counter()
            for obj in tracked_objects.values():
                if len(obj['history']) > 1:
                    obj_counter[obj['label']] += 1

            # report on detected objects
            for obj_name, count in obj_counter.items():
                new_status = 'ON' if count > 0 else 'OFF'
                if new_status != current_object_status[obj_name]:
                    current_object_status[obj_name] = new_status
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}",
                        new_status,
                        retain=False)
                    # send the best snapshot over mqtt
                    best_frame = cv2.cvtColor(best_objects[obj_name]['frame'],
                                              cv2.COLOR_RGB2BGR)
                    ret, jpg = cv2.imencode('.jpg', best_frame)
                    if ret:
                        jpg_bytes = jpg.tobytes()
                        self.client.publish(
                            f"{self.topic_prefix}/{camera}/{obj_name}/snapshot",
                            jpg_bytes,
                            retain=True)

            # expire any objects that are ON and no longer detected
            expired_objects = [
                obj_name for obj_name, status in current_object_status.items()
                if status == 'ON' and not obj_name in obj_counter
            ]
            for obj_name in expired_objects:
                current_object_status[obj_name] = 'OFF'
                self.client.publish(f"{self.topic_prefix}/{camera}/{obj_name}",
                                    'OFF',
                                    retain=False)
                # send updated snapshot over mqtt
                best_frame = cv2.cvtColor(best_objects[obj_name]['frame'],
                                          cv2.COLOR_RGB2BGR)
                ret, jpg = cv2.imencode('.jpg', best_frame)
                if ret:
                    jpg_bytes = jpg.tobytes()
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}/snapshot",
                        jpg_bytes,
                        retain=True)