Beispiel #1
0
    def get_current_frame(self, draw=False):
        with self.current_frame_lock:
            frame_copy = np.copy(self._current_frame)
            frame_time = self.current_frame_time
            tracked_objects = copy.deepcopy(self.tracked_objects)
        
        frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
        # draw on the frame
        if draw:
            # draw the bounding boxes on the frame
            for obj in tracked_objects.values():
                thickness = 2
                color = COLOR_MAP[obj['label']]
                
                if obj['frame_time'] != frame_time:
                    thickness = 1
                    color = (255,0,0)

                # draw the bounding boxes on the frame
                box = obj['box']
                draw_box_with_label(frame_copy, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
                # draw the regions on the frame
                region = obj['region']
                cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1)
            
            if self.config['snapshots']['show_timestamp']:
                time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
                cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)

            if self.config['snapshots']['draw_zones']:
                for name, zone in self.config['zones'].items():
                    thickness = 8 if any([name in obj['zones'] for obj in tracked_objects.values()]) else 2
                    cv2.drawContours(frame_copy, [zone['contour']], -1, zone['color'], thickness)
        
        return frame_copy
Beispiel #2
0
        def snapshot(camera, obj):
            if not 'frame' in obj:
                return
            
            best_frame = cv2.cvtColor(obj['frame'], cv2.COLOR_YUV2BGR_I420)
            if self.camera_config[camera]['snapshots']['draw_bounding_boxes']:
                thickness = 2
                color = COLOR_MAP[obj['label']]
                box = obj['box']
                draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
                
            mqtt_config = self.camera_config[camera].get('mqtt', {'crop_to_region': False})
            if mqtt_config.get('crop_to_region'):
                region = obj['region']
                best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
            if 'snapshot_height' in mqtt_config: 
                height = int(mqtt_config['snapshot_height'])
                width = int(height*best_frame.shape[1]/best_frame.shape[0])
                best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
            
            if self.camera_config[camera]['snapshots']['show_timestamp']:
                time_to_show = datetime.datetime.fromtimestamp(obj['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
                size = cv2.getTextSize(time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2)
                text_width = size[0][0]
                text_height = size[0][1]
                desired_size = max(200, 0.33*best_frame.shape[1])
                font_scale = desired_size/text_width
                cv2.putText(best_frame, time_to_show, (5, best_frame.shape[0]-7), cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, color=(255, 255, 255), thickness=2)

            ret, jpg = cv2.imencode('.jpg', best_frame)
            if ret:
                jpg_bytes = jpg.tobytes()
                self.client.publish(f"{self.topic_prefix}/{camera}/{obj['label']}/snapshot", jpg_bytes, retain=True)
Beispiel #3
0
    def save_debug_frame(self, debug_path, frame_time, tracked_objects):
        current_frame = cv2.cvtColor(
            self.frame_manager.get(f"{self.camera_name}{frame_time}",
                                   self.camera_config.frame_shape_yuv),
            cv2.COLOR_YUV2BGR_I420,
        )
        # draw the bounding boxes on the frame
        for obj in tracked_objects:
            thickness = 2
            color = (0, 0, 175)

            if obj["frame_time"] != frame_time:
                thickness = 1
                color = (255, 0, 0)
            else:
                color = (255, 255, 0)

            # draw the bounding boxes on the frame
            box = obj["box"]
            draw_box_with_label(
                current_frame,
                box[0],
                box[1],
                box[2],
                box[3],
                obj["id"],
                f"{int(obj['score']*100)}% {int(obj['area'])}",
                thickness=thickness,
                color=color,
            )
            # draw the regions on the frame
            region = obj["region"]
            draw_box_with_label(
                current_frame,
                region[0],
                region[1],
                region[2],
                region[3],
                "region",
                "",
                thickness=1,
                color=(0, 255, 0),
            )

        cv2.imwrite(
            f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg",
            current_frame,
        )
Beispiel #4
0
    def save_debug_frame(self, debug_path, frame_time, tracked_objects):
        current_frame = self.frame_manager.get(
            f"{self.camera_name}{frame_time}", self.frame_shape)
        # draw the bounding boxes on the frame
        for obj in tracked_objects:
            thickness = 2
            color = (0, 0, 175)

            if obj['frame_time'] != frame_time:
                thickness = 1
                color = (255, 0, 0)
            else:
                color = (255, 255, 0)

            # draw the bounding boxes on the frame
            box = obj['box']
            draw_box_with_label(current_frame,
                                box[0],
                                box[1],
                                box[2],
                                box[3],
                                obj['label'],
                                f"{int(obj['score']*100)}% {int(obj['area'])}",
                                thickness=thickness,
                                color=color)
            # draw the regions on the frame
            region = obj['region']
            draw_box_with_label(current_frame,
                                region[0],
                                region[1],
                                region[2],
                                region[3],
                                'region',
                                "",
                                thickness=1,
                                color=(0, 255, 0))

        cv2.imwrite(
            f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg",
            cv2.cvtColor(current_frame, cv2.COLOR_RGB2BGR))
Beispiel #5
0
    def update(self, frame_time, tracked_objects):
        self.current_frame_time = frame_time
        # get the new frame and delete the old frame
        frame_id = f"{self.name}{frame_time}"
        self.current_frame = self.frame_manager.get(frame_id)
        if not self.previous_frame_id is None:
            self.frame_manager.delete(self.previous_frame_id)
        self.previous_frame_id = frame_id

        current_ids = tracked_objects.keys()
        previous_ids = self.tracked_objects.keys()
        removed_ids = list(set(previous_ids).difference(current_ids))
        new_ids = list(set(current_ids).difference(previous_ids))
        updated_ids = list(set(current_ids).intersection(previous_ids))

        for id in new_ids:
            self.tracked_objects[id] = tracked_objects[id]
            self.tracked_objects[id]['zones'] = []

            # start the score history
            self.tracked_objects[id]['score_history'] = [
                self.tracked_objects[id]['score']
            ]

            # calculate if this is a false positive
            self.tracked_objects[id]['computed_score'] = self.compute_score(
                self.tracked_objects[id])
            self.tracked_objects[id]['false_positive'] = self.false_positive(
                self.tracked_objects[id])

            # call event handlers
            for c in self.callbacks['start']:
                c(self.name, tracked_objects[id])

        for id in updated_ids:
            self.tracked_objects[id].update(tracked_objects[id])

            # if the object is not in the current frame, add a 0.0 to the score history
            if self.tracked_objects[id][
                    'frame_time'] != self.current_frame_time:
                self.tracked_objects[id]['score_history'].append(0.0)
            else:
                self.tracked_objects[id]['score_history'].append(
                    self.tracked_objects[id]['score'])
            # only keep the last 10 scores
            if len(self.tracked_objects[id]['score_history']) > 10:
                self.tracked_objects[id][
                    'score_history'] = self.tracked_objects[id][
                        'score_history'][-10:]

            # calculate if this is a false positive
            self.tracked_objects[id]['computed_score'] = self.compute_score(
                self.tracked_objects[id])
            self.tracked_objects[id]['false_positive'] = self.false_positive(
                self.tracked_objects[id])

            # call event handlers
            for c in self.callbacks['update']:
                c(self.name, self.tracked_objects[id])

        for id in removed_ids:
            # publish events to mqtt
            self.tracked_objects[id]['end_time'] = frame_time
            for c in self.callbacks['end']:
                c(self.name, self.tracked_objects[id])
            del self.tracked_objects[id]

        # check to see if the objects are in any zones
        for obj in self.tracked_objects.values():
            current_zones = []
            bottom_center = (obj['centroid'][0], obj['box'][3])
            # check each zone
            for name, zone in self.config['zones'].items():
                contour = zone['contour']
                # check if the object is in the zone and not filtered
                if (cv2.pointPolygonTest(contour, bottom_center, False) >= 0
                        and not zone_filtered(obj, zone.get('filters', {}))):
                    current_zones.append(name)
            obj['zones'] = current_zones

        # draw on the frame
        if not self.current_frame is None:
            # draw the bounding boxes on the frame
            for obj in self.tracked_objects.values():
                thickness = 2
                color = COLOR_MAP[obj['label']]

                if obj['frame_time'] != frame_time:
                    thickness = 1
                    color = (255, 0, 0)

                # draw the bounding boxes on the frame
                box = obj['box']
                draw_box_with_label(
                    self.current_frame,
                    box[0],
                    box[1],
                    box[2],
                    box[3],
                    obj['label'],
                    f"{int(obj['score']*100)}% {int(obj['area'])}",
                    thickness=thickness,
                    color=color)
                # draw the regions on the frame
                region = obj['region']
                cv2.rectangle(self.current_frame, (region[0], region[1]),
                              (region[2], region[3]), (0, 255, 0), 1)

            if self.config['snapshots']['show_timestamp']:
                time_to_show = datetime.datetime.fromtimestamp(
                    frame_time).strftime("%m/%d/%Y %H:%M:%S")
                cv2.putText(self.current_frame,
                            time_to_show, (10, 30),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            fontScale=.8,
                            color=(255, 255, 255),
                            thickness=2)

            if self.config['snapshots']['draw_zones']:
                for name, zone in self.config['zones'].items():
                    thickness = 8 if any([
                        name in obj['zones']
                        for obj in self.tracked_objects.values()
                    ]) else 2
                    cv2.drawContours(self.current_frame, [zone['contour']], -1,
                                     zone['color'], thickness)

        # maintain best objects
        for obj in self.tracked_objects.values():
            object_type = obj['label']
            # if the object wasn't seen on the current frame, skip it
            if obj['frame_time'] != self.current_frame_time or obj[
                    'false_positive']:
                continue
            obj_copy = copy.deepcopy(obj)
            if object_type in self.best_objects:
                current_best = self.best_objects[object_type]
                now = datetime.datetime.now().timestamp()
                # if the object is a higher score than the current best score
                # or the current object is older than desired, use the new object
                if obj_copy['score'] > current_best['score'] or (
                        now - current_best['frame_time']) > self.config.get(
                            'best_image_timeout', 60):
                    obj_copy['frame'] = np.copy(self.current_frame)
                    self.best_objects[object_type] = obj_copy
                    for c in self.callbacks['snapshot']:
                        c(self.name, self.best_objects[object_type])
            else:
                obj_copy['frame'] = np.copy(self.current_frame)
                self.best_objects[object_type] = obj_copy
                for c in self.callbacks['snapshot']:
                    c(self.name, self.best_objects[object_type])

        # update overall camera state for each object type
        obj_counter = Counter()
        for obj in self.tracked_objects.values():
            if not obj['false_positive']:
                obj_counter[obj['label']] += 1

        # report on detected objects
        for obj_name, count in obj_counter.items():
            new_status = 'ON' if count > 0 else 'OFF'
            if new_status != self.object_status[obj_name]:
                self.object_status[obj_name] = new_status
                for c in self.callbacks['object_status']:
                    c(self.name, obj_name, new_status)

        # expire any objects that are ON and no longer detected
        expired_objects = [
            obj_name for obj_name, status in self.object_status.items()
            if status == 'ON' and not obj_name in obj_counter
        ]
        for obj_name in expired_objects:
            self.object_status[obj_name] = 'OFF'
            for c in self.callbacks['object_status']:
                c(self.name, obj_name, 'OFF')
            for c in self.callbacks['snapshot']:
                c(self.name, self.best_objects[obj_name])
Beispiel #6
0
    def run(self):
        while True:
            if self.stop_event.is_set():
                print(f"Exiting object processor...")
                break

            try:
                camera, frame_time, current_tracked_objects = self.tracked_objects_queue.get(
                    True, 10)
            except queue.Empty:
                continue

            camera_config = self.camera_config[camera]
            best_objects = self.camera_data[camera]['best_objects']
            current_object_status = self.camera_data[camera]['object_status']
            tracked_objects = self.camera_data[camera]['tracked_objects']

            current_ids = current_tracked_objects.keys()
            previous_ids = tracked_objects.keys()
            removed_ids = list(set(previous_ids).difference(current_ids))
            new_ids = list(set(current_ids).difference(previous_ids))
            updated_ids = list(set(current_ids).intersection(previous_ids))

            for id in new_ids:
                # only register the object here if we are sure it isnt a false positive
                if not filter_false_positives(current_tracked_objects[id]):
                    tracked_objects[id] = current_tracked_objects[id]
                    # publish events to mqtt
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/events/start",
                        json.dumps(tracked_objects[id]),
                        retain=False)
                    self.event_queue.put(
                        ('start', camera, tracked_objects[id]))

            for id in updated_ids:
                tracked_objects[id] = current_tracked_objects[id]

            for id in removed_ids:
                # publish events to mqtt
                tracked_objects[id]['end_time'] = frame_time
                self.client.publish(f"{self.topic_prefix}/{camera}/events/end",
                                    json.dumps(tracked_objects[id]),
                                    retain=False)
                self.event_queue.put(('end', camera, tracked_objects[id]))
                del tracked_objects[id]

            self.camera_data[camera]['current_frame_time'] = frame_time

            # build a dict of objects in each zone for current camera
            current_objects_in_zones = defaultdict(lambda: [])
            for obj in tracked_objects.values():
                bottom_center = (obj['centroid'][0], obj['box'][3])
                # check each zone
                for name, zone in self.zone_data.items():
                    current_contour = zone['contours'].get(camera, None)
                    # if the current camera does not have a contour for this zone, skip
                    if current_contour is None:
                        continue
                    # check if the object is in the zone and not filtered
                    if (cv2.pointPolygonTest(current_contour, bottom_center,
                                             False) >= 0
                            and not zone_filtered(
                                obj, self.zone_config[name][camera].get(
                                    'filters', {}))):
                        current_objects_in_zones[name].append(obj['label'])

            ###
            # Draw tracked objects on the frame
            ###
            current_frame = self.plasma_client.get(f"{camera}{frame_time}")

            if not current_frame is plasma.ObjectNotAvailable:
                # draw the bounding boxes on the frame
                for obj in tracked_objects.values():
                    thickness = 2
                    color = COLOR_MAP[obj['label']]

                    if obj['frame_time'] != frame_time:
                        thickness = 1
                        color = (255, 0, 0)

                    # draw the bounding boxes on the frame
                    box = obj['box']
                    draw_box_with_label(
                        current_frame,
                        box[0],
                        box[1],
                        box[2],
                        box[3],
                        obj['label'],
                        f"{int(obj['score']*100)}% {int(obj['area'])}",
                        thickness=thickness,
                        color=color)
                    # draw the regions on the frame
                    region = obj['region']
                    cv2.rectangle(current_frame, (region[0], region[1]),
                                  (region[2], region[3]), (0, 255, 0), 1)

                if camera_config['snapshots']['show_timestamp']:
                    time_to_show = datetime.datetime.fromtimestamp(
                        frame_time).strftime("%m/%d/%Y %H:%M:%S")
                    cv2.putText(current_frame,
                                time_to_show, (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=.8,
                                color=(255, 255, 255),
                                thickness=2)

                if camera_config['snapshots']['draw_zones']:
                    for name, zone in self.zone_data.items():
                        thickness = 2 if len(
                            current_objects_in_zones[name]) == 0 else 8
                        if camera in zone['contours']:
                            cv2.drawContours(current_frame,
                                             [zone['contours'][camera]], -1,
                                             zone['color'], thickness)

                ###
                # Set the current frame
                ###
                self.camera_data[camera]['current_frame'] = current_frame

                # delete the previous frame from the plasma store and update the object id
                if not self.camera_data[camera]['object_id'] is None:
                    self.plasma_client.delete(
                        self.camera_data[camera]['object_id'])
                self.camera_data[camera]['object_id'] = f"{camera}{frame_time}"

            ###
            # Maintain the highest scoring recent object and frame for each label
            ###
            for obj in tracked_objects.values():
                # if the object wasn't seen on the current frame, skip it
                if obj['frame_time'] != frame_time:
                    continue
                if obj['label'] in best_objects:
                    now = datetime.datetime.now().timestamp()
                    # if the object is a higher score than the current best score
                    # or the current object is more than 1 minute old, use the new object
                    if obj['score'] > best_objects[obj['label']]['score'] or (
                            now -
                            best_objects[obj['label']]['frame_time']) > 60:
                        obj['frame'] = np.copy(
                            self.camera_data[camera]['current_frame'])
                        best_objects[obj['label']] = obj
                        # send updated snapshot over mqtt
                        best_frame = cv2.cvtColor(obj['frame'],
                                                  cv2.COLOR_RGB2BGR)
                        ret, jpg = cv2.imencode('.jpg', best_frame)
                        if ret:
                            jpg_bytes = jpg.tobytes()
                            self.client.publish(
                                f"{self.topic_prefix}/{camera}/{obj['label']}/snapshot",
                                jpg_bytes,
                                retain=True)
                else:
                    obj['frame'] = np.copy(
                        self.camera_data[camera]['current_frame'])
                    best_objects[obj['label']] = obj

            ###
            # Report over MQTT
            ###

            # get the zones that are relevant for this camera
            relevant_zones = [
                zone for zone, config in self.zone_config.items()
                if camera in config
            ]
            for zone in relevant_zones:
                # create the set of labels in the current frame and previously reported
                labels_for_zone = set(
                    current_objects_in_zones[zone] +
                    list(self.zone_data[zone]['object_status'][camera].keys()))
                # for each label
                for label in labels_for_zone:
                    # compute the current 'ON' vs 'OFF' status by checking if any camera sees the object in the zone
                    previous_state = any([
                        c[label] == 'ON' for c in self.zone_data[zone]
                        ['object_status'].values()
                    ])
                    self.zone_data[zone]['object_status'][camera][
                        label] = 'ON' if label in current_objects_in_zones[
                            zone] else 'OFF'
                    new_state = any([
                        c[label] == 'ON' for c in self.zone_data[zone]
                        ['object_status'].values()
                    ])
                    # if the value is changing, send over MQTT
                    if previous_state == False and new_state == True:
                        self.client.publish(
                            f"{self.topic_prefix}/{zone}/{label}",
                            'ON',
                            retain=False)
                    elif previous_state == True and new_state == False:
                        self.client.publish(
                            f"{self.topic_prefix}/{zone}/{label}",
                            'OFF',
                            retain=False)

            # count  by type
            obj_counter = Counter()
            for obj in tracked_objects.values():
                obj_counter[obj['label']] += 1

            # report on detected objects
            for obj_name, count in obj_counter.items():
                new_status = 'ON' if count > 0 else 'OFF'
                if new_status != current_object_status[obj_name]:
                    current_object_status[obj_name] = new_status
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}",
                        new_status,
                        retain=False)
                    # send the best snapshot over mqtt
                    best_frame = cv2.cvtColor(best_objects[obj_name]['frame'],
                                              cv2.COLOR_RGB2BGR)
                    ret, jpg = cv2.imencode('.jpg', best_frame)
                    if ret:
                        jpg_bytes = jpg.tobytes()
                        self.client.publish(
                            f"{self.topic_prefix}/{camera}/{obj_name}/snapshot",
                            jpg_bytes,
                            retain=True)

            # expire any objects that are ON and no longer detected
            expired_objects = [
                obj_name for obj_name, status in current_object_status.items()
                if status == 'ON' and not obj_name in obj_counter
            ]
            for obj_name in expired_objects:
                current_object_status[obj_name] = 'OFF'
                self.client.publish(f"{self.topic_prefix}/{camera}/{obj_name}",
                                    'OFF',
                                    retain=False)
                # send updated snapshot over mqtt
                best_frame = cv2.cvtColor(best_objects[obj_name]['frame'],
                                          cv2.COLOR_RGB2BGR)
                ret, jpg = cv2.imencode('.jpg', best_frame)
                if ret:
                    jpg_bytes = jpg.tobytes()
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}/snapshot",
                        jpg_bytes,
                        retain=True)
    def run(self):
        while True:
            camera, frame_time, tracked_objects = self.tracked_objects_queue.get(
            )
            debug("{} {} {}".format(camera, frame_time, tracked_objects))
            # debug(self.config)
            # delete by wang jin liang
            # config = self.config[camera]

            best_objects = self.camera_data[camera]['best_objects']
            current_object_status = self.camera_data[camera]['object_status']
            self.camera_data[camera]['tracked_objects'] = tracked_objects

            ###
            # Draw tracked objects on the frame
            ###
            object_id_hash = hashlib.sha1(str.encode(f"{camera}{frame_time}"))
            object_id_bytes = object_id_hash.digest()
            object_id = plasma.ObjectID(object_id_bytes)
            current_frame = self.plasma_client.get(object_id, timeout_ms=0)

            if not current_frame is plasma.ObjectNotAvailable:
                # draw the bounding boxes on the frame
                for obj in tracked_objects.values():
                    thickness = 2
                    color = COLOR_MAP[obj['label']]

                    if obj['frame_time'] != frame_time:
                        thickness = 1
                        color = (255, 0, 0)

                    # draw the bounding boxes on the frame
                    box = obj['box']
                    draw_box_with_label(
                        current_frame,
                        box[0],
                        box[1],
                        box[2],
                        box[3],
                        obj['label'],
                        f"{int(obj['score']*100)}% {int(obj['area'])}",
                        thickness=thickness,
                        color=color)
                    # draw the regions on the frame
                    region = obj['region']
                    cv2.rectangle(current_frame, (region[0], region[1]),
                                  (region[2], region[3]), (0, 255, 0), 1)

                # delete by wangjinliang

                # if config['snapshots']['show_timestamp']:
                #     time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
                #     cv2.putText(current_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)

                ###
                # Set the current frame as ready
                ###
                self.camera_data[camera]['current_frame'] = current_frame

                # store the object id, so you can delete it at the next loop
                previous_object_id = self.camera_data[camera]['object_id']
                if not previous_object_id is None:
                    self.plasma_client.delete([previous_object_id])
                self.camera_data[camera]['object_id'] = object_id

            ###
            # Maintain the highest scoring recent object and frame for each label
            ###
            for obj in tracked_objects.values():
                # if the object wasn't seen on the current frame, skip it
                if obj['frame_time'] != frame_time:
                    continue
                if obj['label'] in best_objects:
                    now = datetime.datetime.now().timestamp()
                    # if the object is a higher score than the current best score
                    # or the current object is more than 1 minute old, use the new object
                    if obj['score'] > best_objects[obj['label']]['score'] or (
                            now -
                            best_objects[obj['label']]['frame_time']) > 60:
                        obj['frame'] = np.copy(
                            self.camera_data[camera]['current_frame'])
                        best_objects[obj['label']] = obj
                else:
                    obj['frame'] = np.copy(
                        self.camera_data[camera]['current_frame'])
                    best_objects[obj['label']] = obj

            ###
            # Report over MQTT
            ###
            # count objects with more than 2 entries in history by type
            obj_counter = Counter()
            for obj in tracked_objects.values():
                if len(obj['history']) > 1:
                    obj_counter[obj['label']] += 1

            # report on detected objects
            for obj_name, count in obj_counter.items():
                new_status = 'ON' if count > 0 else 'OFF'
                if new_status != current_object_status[obj_name]:
                    current_object_status[obj_name] = new_status
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}",
                        new_status,
                        retain=False)
                    # send the best snapshot over mqtt
                    best_frame = cv2.cvtColor(best_objects[obj_name]['frame'],
                                              cv2.COLOR_RGB2BGR)
                    ret, jpg = cv2.imencode('.jpg', best_frame)
                    if ret:
                        jpg_bytes = jpg.tobytes()
                        self.client.publish(
                            f"{self.topic_prefix}/{camera}/{obj_name}/snapshot",
                            jpg_bytes,
                            retain=True)

            # expire any objects that are ON and no longer detected
            expired_objects = [
                obj_name for obj_name, status in current_object_status.items()
                if status == 'ON' and not obj_name in obj_counter
            ]
            for obj_name in expired_objects:
                current_object_status[obj_name] = 'OFF'
                self.client.publish(f"{self.topic_prefix}/{camera}/{obj_name}",
                                    'OFF',
                                    retain=False)
                # send updated snapshot over mqtt
                best_frame = cv2.cvtColor(best_objects[obj_name]['frame'],
                                          cv2.COLOR_RGB2BGR)
                ret, jpg = cv2.imencode('.jpg', best_frame)
                if ret:
                    jpg_bytes = jpg.tobytes()
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}/snapshot",
                        jpg_bytes,
                        retain=True)
Beispiel #8
0
    def get_current_frame(self, draw_options={}):
        with self.current_frame_lock:
            frame_copy = np.copy(self._current_frame)
            frame_time = self.current_frame_time
            tracked_objects = {
                k: v.to_dict()
                for k, v in self.tracked_objects.items()
            }
            motion_boxes = self.motion_boxes.copy()
            regions = self.regions.copy()

        frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
        # draw on the frame
        if draw_options.get("bounding_boxes"):
            # draw the bounding boxes on the frame
            for obj in tracked_objects.values():
                if obj["frame_time"] == frame_time:
                    thickness = 2
                    color = COLOR_MAP[obj["label"]]
                else:
                    thickness = 1
                    color = (255, 0, 0)

                # draw the bounding boxes on the frame
                box = obj["box"]
                draw_box_with_label(
                    frame_copy,
                    box[0],
                    box[1],
                    box[2],
                    box[3],
                    obj["label"],
                    f"{obj['score']:.0%} {int(obj['area'])}",
                    thickness=thickness,
                    color=color,
                )

        if draw_options.get("regions"):
            for region in regions:
                cv2.rectangle(
                    frame_copy,
                    (region[0], region[1]),
                    (region[2], region[3]),
                    (0, 255, 0),
                    2,
                )

        if draw_options.get("zones"):
            for name, zone in self.camera_config.zones.items():
                thickness = (8 if any(
                    name in obj["current_zones"]
                    for obj in tracked_objects.values()) else 2)
                cv2.drawContours(frame_copy, [zone.contour], -1, zone.color,
                                 thickness)

        if draw_options.get("mask"):
            mask_overlay = np.where(self.camera_config.motion.mask == [0])
            frame_copy[mask_overlay] = [0, 0, 0]

        if draw_options.get("motion_boxes"):
            for m_box in motion_boxes:
                cv2.rectangle(
                    frame_copy,
                    (m_box[0], m_box[1]),
                    (m_box[2], m_box[3]),
                    (0, 0, 255),
                    2,
                )

        if draw_options.get("timestamp"):
            time_to_show = datetime.datetime.fromtimestamp(
                frame_time).strftime("%m/%d/%Y %H:%M:%S")
            cv2.putText(
                frame_copy,
                time_to_show,
                (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=0.8,
                color=(255, 255, 255),
                thickness=2,
            )

        return frame_copy
Beispiel #9
0
    def get_jpg_bytes(self,
                      timestamp=False,
                      bounding_box=False,
                      crop=False,
                      height=None):
        if self.thumbnail_data is None:
            return None

        try:
            best_frame = cv2.cvtColor(
                self.frame_cache[self.thumbnail_data["frame_time"]],
                cv2.COLOR_YUV2BGR_I420,
            )
        except KeyError:
            logger.warning(
                f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache"
            )
            return None

        if bounding_box:
            thickness = 2
            color = COLOR_MAP[self.obj_data["label"]]

            # draw the bounding boxes on the frame
            box = self.thumbnail_data["box"]
            draw_box_with_label(
                best_frame,
                box[0],
                box[1],
                box[2],
                box[3],
                self.obj_data["label"],
                f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}",
                thickness=thickness,
                color=color,
            )

        if crop:
            box = self.thumbnail_data["box"]
            region = calculate_region(best_frame.shape, box[0], box[1], box[2],
                                      box[3], 1.1)
            best_frame = best_frame[region[1]:region[3], region[0]:region[2]]

        if height:
            width = int(height * best_frame.shape[1] / best_frame.shape[0])
            best_frame = cv2.resize(best_frame,
                                    dsize=(width, height),
                                    interpolation=cv2.INTER_AREA)

        if timestamp:
            time_to_show = datetime.datetime.fromtimestamp(
                self.thumbnail_data["frame_time"]).strftime(
                    "%m/%d/%Y %H:%M:%S")
            size = cv2.getTextSize(time_to_show,
                                   cv2.FONT_HERSHEY_SIMPLEX,
                                   fontScale=1,
                                   thickness=2)
            text_width = size[0][0]
            desired_size = max(150, 0.33 * best_frame.shape[1])
            font_scale = desired_size / text_width
            cv2.putText(
                best_frame,
                time_to_show,
                (5, best_frame.shape[0] - 7),
                cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=font_scale,
                color=(255, 255, 255),
                thickness=2,
            )

        ret, jpg = cv2.imencode(".jpg", best_frame,
                                [int(cv2.IMWRITE_JPEG_QUALITY), 70])
        if ret:
            return jpg.tobytes()
        else:
            return None
    def run(self):
        while True:
            camera, frame_time, current_tracked_objects = self.tracked_objects_queue.get(
            )

            config = self.config[camera]
            best_objects = self.camera_data[camera]['best_objects']
            current_object_status = self.camera_data[camera]['object_status']
            tracked_objects = self.camera_data[camera]['tracked_objects']

            current_ids = current_tracked_objects.keys()
            previous_ids = tracked_objects.keys()
            removed_ids = list(set(previous_ids).difference(current_ids))
            new_ids = list(set(current_ids).difference(previous_ids))
            updated_ids = list(set(current_ids).intersection(previous_ids))

            for id in new_ids:
                tracked_objects[id] = current_tracked_objects[id]
                # publish events to mqtt
                self.client.publish(
                    f"{self.topic_prefix}/{camera}/events/start",
                    json.dumps(tracked_objects[id]),
                    retain=False)
                self.event_queue.put(('start', camera, tracked_objects[id]))

            for id in updated_ids:
                tracked_objects[id] = current_tracked_objects[id]

            for id in removed_ids:
                # publish events to mqtt
                tracked_objects[id]['end_time'] = frame_time
                self.client.publish(f"{self.topic_prefix}/{camera}/events/end",
                                    json.dumps(tracked_objects[id]),
                                    retain=False)
                self.event_queue.put(('end', camera, tracked_objects[id]))
                del tracked_objects[id]

            self.camera_data[camera]['current_frame_time'] = frame_time

            ###
            # Update room tracker if enabled
            ###
            room_tracker_conf = config.get("room_tracker", None)
            if room_tracker_conf is not None and room_tracker_conf.get(
                    "enabled", False):
                if self.room_tracker is None:
                    self.room_tracker = RoomTracker(room_tracker_conf)
                self.room_tracker.on_change(frame_time, tracked_objects)

            ###
            # Draw tracked objects on the frame
            ###
            current_frame = self.plasma_client.get(f"{camera}{frame_time}")

            if not current_frame is plasma.ObjectNotAvailable:
                # draw the bounding boxes on the frame
                for obj in tracked_objects.values():
                    thickness = 2
                    color = COLOR_MAP[obj['label']]

                    if obj['frame_time'] != frame_time:
                        thickness = 1
                        color = (255, 0, 0)

                    # draw the bounding boxes on the frame
                    box = obj['box']
                    draw_box_with_label(
                        current_frame,
                        box[0],
                        box[1],
                        box[2],
                        box[3],
                        obj['label'],
                        f"{int(obj['score']*100)}% {int(obj['area'])}",
                        thickness=thickness,
                        color=color)
                    # draw the regions on the frame
                    region = obj['region']
                    cv2.rectangle(current_frame, (region[0], region[1]),
                                  (region[2], region[3]), (0, 255, 0), 1)

                if config['snapshots']['show_timestamp']:
                    time_to_show = datetime.datetime.fromtimestamp(
                        frame_time).strftime("%m/%d/%Y %H:%M:%S")
                    cv2.putText(current_frame,
                                time_to_show, (10, 30),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=.8,
                                color=(255, 255, 255),
                                thickness=2)

                # Draw room tracker area points
                if self.room_tracker is not None:
                    for room_name, c in self.room_tracker.rooms_conf.items():
                        p = (c["point_x"], c["point_y"])
                        cv2.rectangle(current_frame, (p[0] - 10, p[1] - 10),
                                      (p[0] + 10, p[1] + 10), (255, 0, 0), 3)

                ###
                # Set the current frame
                ###
                self.camera_data[camera]['current_frame'] = current_frame

                # delete the previous frame from the plasma store and update the object id
                if not self.camera_data[camera]['object_id'] is None:
                    self.plasma_client.delete(
                        self.camera_data[camera]['object_id'])
                self.camera_data[camera]['object_id'] = f"{camera}{frame_time}"

            ###
            # Maintain the highest scoring recent object and frame for each label
            ###
            for obj in tracked_objects.values():
                # if the object wasn't seen on the current frame, skip it
                if obj['frame_time'] != frame_time:
                    continue
                if obj['label'] in best_objects:
                    now = datetime.datetime.now().timestamp()
                    # if the object is a higher score than the current best score
                    # or the current object is more than 1 minute old, use the new object
                    if obj['score'] > best_objects[obj['label']]['score'] or (
                            now -
                            best_objects[obj['label']]['frame_time']) > 60:
                        obj['frame'] = np.copy(
                            self.camera_data[camera]['current_frame'])
                        best_objects[obj['label']] = obj
                        # send updated snapshot over mqtt
                        best_frame = cv2.cvtColor(obj['frame'],
                                                  cv2.COLOR_RGB2BGR)
                        ret, jpg = cv2.imencode('.jpg', best_frame)
                        if ret:
                            jpg_bytes = jpg.tobytes()
                            self.client.publish(
                                f"{self.topic_prefix}/{camera}/{obj['label']}/snapshot",
                                jpg_bytes,
                                retain=True)
                else:
                    obj['frame'] = np.copy(
                        self.camera_data[camera]['current_frame'])
                    best_objects[obj['label']] = obj

            ###
            # Report over MQTT
            ###
            # count objects by type
            obj_counter = Counter()
            for obj in tracked_objects.values():
                obj_counter[obj['label']] += 1

            # report on detected objects
            for obj_name, count in obj_counter.items():
                new_status = 'ON' if count > 0 else 'OFF'
                if new_status != current_object_status[obj_name]:
                    current_object_status[obj_name] = new_status
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}",
                        new_status,
                        retain=False)
                    # send the best snapshot over mqtt
                    best_frame = cv2.cvtColor(best_objects[obj_name]['frame'],
                                              cv2.COLOR_RGB2BGR)
                    ret, jpg = cv2.imencode('.jpg', best_frame)
                    if ret:
                        jpg_bytes = jpg.tobytes()
                        self.client.publish(
                            f"{self.topic_prefix}/{camera}/{obj_name}/snapshot",
                            jpg_bytes,
                            retain=True)

            # expire any objects that are ON and no longer detected
            expired_objects = [
                obj_name for obj_name, status in current_object_status.items()
                if status == 'ON' and not obj_name in obj_counter
            ]
            for obj_name in expired_objects:
                current_object_status[obj_name] = 'OFF'
                self.client.publish(f"{self.topic_prefix}/{camera}/{obj_name}",
                                    'OFF',
                                    retain=False)
                # send updated snapshot over mqtt
                best_frame = cv2.cvtColor(best_objects[obj_name]['frame'],
                                          cv2.COLOR_RGB2BGR)
                ret, jpg = cv2.imencode('.jpg', best_frame)
                if ret:
                    jpg_bytes = jpg.tobytes()
                    self.client.publish(
                        f"{self.topic_prefix}/{camera}/{obj_name}/snapshot",
                        jpg_bytes,
                        retain=True)

            # report area tracking
            if self.room_tracker is not None:
                for room_name, _ in self.room_tracker.rooms_conf.items():
                    ppl_count = self.room_tracker.get_area_count(room_name)
                    status = "ON" if ppl_count > 0 else "OFF"
                    timestamp = self.room_tracker.get_latest_change_timestamp(
                        room_name)
                    r = {
                        "status": status,
                        "count": ppl_count,
                        "timestamp": timestamp,
                    }
                    if room_name in self.room_tracker_mqtt_state and self.room_tracker_mqtt_state[
                            room_name] == r:
                        continue
                    else:
                        self.client.publish(
                            f"{self.topic_prefix}/{camera}/area/{room_name}",
                            json.dumps(r),
                            retain=False)
                        self.room_tracker_mqtt_state[room_name] = r
Beispiel #11
0
    def get_current_frame(self, draw_options={}):
        with self.current_frame_lock:
            frame_copy = np.copy(self._current_frame)
            frame_time = self.current_frame_time
            tracked_objects = {k: v.to_dict() for k, v in self.tracked_objects.items()}
            motion_boxes = self.motion_boxes.copy()
            regions = self.regions.copy()

        frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
        # draw on the frame
        if draw_options.get("bounding_boxes"):
            # draw the bounding boxes on the frame
            for obj in tracked_objects.values():
                if obj["frame_time"] == frame_time:
                    thickness = 2
                    color = self.config.model.colormap[obj["label"]]
                else:
                    thickness = 1
                    color = (255, 0, 0)

                # draw the bounding boxes on the frame
                box = obj["box"]
                draw_box_with_label(
                    frame_copy,
                    box[0],
                    box[1],
                    box[2],
                    box[3],
                    obj["label"],
                    f"{obj['score']:.0%} {int(obj['area'])}",
                    thickness=thickness,
                    color=color,
                )

        if draw_options.get("regions"):
            for region in regions:
                cv2.rectangle(
                    frame_copy,
                    (region[0], region[1]),
                    (region[2], region[3]),
                    (0, 255, 0),
                    2,
                )

        if draw_options.get("zones"):
            for name, zone in self.camera_config.zones.items():
                thickness = (
                    8
                    if any(
                        name in obj["current_zones"] for obj in tracked_objects.values()
                    )
                    else 2
                )
                cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)

        if draw_options.get("mask"):
            mask_overlay = np.where(self.camera_config.motion.mask == [0])
            frame_copy[mask_overlay] = [0, 0, 0]

        if draw_options.get("motion_boxes"):
            for m_box in motion_boxes:
                cv2.rectangle(
                    frame_copy,
                    (m_box[0], m_box[1]),
                    (m_box[2], m_box[3]),
                    (0, 0, 255),
                    2,
                )

        if draw_options.get("timestamp"):
            color = self.camera_config.timestamp_style.color
            draw_timestamp(
                frame_copy,
                frame_time,
                self.camera_config.timestamp_style.format,
                font_effect=self.camera_config.timestamp_style.effect,
                font_thickness=self.camera_config.timestamp_style.thickness,
                font_color=(color.blue, color.green, color.red),
                position=self.camera_config.timestamp_style.position,
            )

        return frame_copy
Beispiel #12
0
    def get_jpg_bytes(
        self, timestamp=False, bounding_box=False, crop=False, height=None, quality=70
    ):
        if self.thumbnail_data is None:
            return None

        try:
            best_frame = cv2.cvtColor(
                self.frame_cache[self.thumbnail_data["frame_time"]],
                cv2.COLOR_YUV2BGR_I420,
            )
        except KeyError:
            logger.warning(
                f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache"
            )
            return None

        if bounding_box:
            thickness = 2
            color = self.colormap[self.obj_data["label"]]

            # draw the bounding boxes on the frame
            box = self.thumbnail_data["box"]
            draw_box_with_label(
                best_frame,
                box[0],
                box[1],
                box[2],
                box[3],
                self.obj_data["label"],
                f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}",
                thickness=thickness,
                color=color,
            )

        if crop:
            box = self.thumbnail_data["box"]
            region = calculate_region(
                best_frame.shape, box[0], box[1], box[2], box[3], 1.1
            )
            best_frame = best_frame[region[1] : region[3], region[0] : region[2]]

        if height:
            width = int(height * best_frame.shape[1] / best_frame.shape[0])
            best_frame = cv2.resize(
                best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA
            )
        if timestamp:
            color = self.camera_config.timestamp_style.color
            draw_timestamp(
                best_frame,
                self.thumbnail_data["frame_time"],
                self.camera_config.timestamp_style.format,
                font_effect=self.camera_config.timestamp_style.effect,
                font_thickness=self.camera_config.timestamp_style.thickness,
                font_color=(color.blue, color.green, color.red),
                position=self.camera_config.timestamp_style.position,
            )

        ret, jpg = cv2.imencode(
            ".jpg", best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), quality]
        )
        if ret:
            return jpg.tobytes()
        else:
            return None