Пример #1
0
def start(id, num_detections, detection_queue, event):
  object_detector = RemoteObjectDetector(str(id), '/labelmap.txt', detection_queue, event)
  start = datetime.datetime.now().timestamp()

  frame_times = []
  for x in range(0, num_detections):
    start_frame = datetime.datetime.now().timestamp()
    detections = object_detector.detect(my_frame)
    frame_times.append(datetime.datetime.now().timestamp()-start_frame)

  duration = datetime.datetime.now().timestamp()-start
  object_detector.cleanup()
  print(f"{id} - Processed for {duration:.2f} seconds.")
  print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
  print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
Пример #2
0
def track_camera(name, config: CameraConfig, model_shape, detection_queue, result_connection, detected_objects_queue, process_info):
    stop_event = mp.Event()
    def receiveSignal(signalNumber, frame):
        stop_event.set()
    
    signal.signal(signal.SIGTERM, receiveSignal)
    signal.signal(signal.SIGINT, receiveSignal)

    threading.current_thread().name = f"process:{name}"
    setproctitle(f"frigate.process:{name}")
    listen()

    frame_queue = process_info['frame_queue']
    detection_enabled = process_info['detection_enabled']

    frame_shape = config.frame_shape
    objects_to_track = config.objects.track
    object_filters = config.objects.filters

    motion_detector = MotionDetector(frame_shape, config.motion)
    object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection, model_shape)

    object_tracker = ObjectTracker(config.detect)

    frame_manager = SharedMemoryFrameManager()

    process_frames(name, frame_queue, frame_shape, model_shape, frame_manager, motion_detector, object_detector,
        object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, detection_enabled, stop_event)

    logger.info(f"{name}: exiting subprocess")
Пример #3
0
def track_camera(name, config, frame_queue, frame_shape, detection_queue,
                 result_connection, detected_objects_queue, fps, detection_fps,
                 read_start, detection_frame, stop_event):
    print(f"Starting process for {name}: {os.getpid()}")
    listen()

    detection_frame.value = 0.0

    # Merge the tracked object config with the global config
    camera_objects_config = config.get('objects', {})
    objects_to_track = camera_objects_config.get('track', [])
    object_filters = camera_objects_config.get('filters', {})

    # load in the mask for object detection
    if 'mask' in config:
        if config['mask'].startswith('base64,'):
            img = base64.b64decode(config['mask'][7:])
            npimg = np.fromstring(img, dtype=np.uint8)
            mask = cv2.imdecode(npimg, cv2.IMREAD_GRAYSCALE)
        elif config['mask'].startswith('poly,'):
            points = config['mask'].split(',')[1:]
            contour = np.array([[int(points[i]),
                                 int(points[i + 1])]
                                for i in range(0, len(points), 2)])
            mask = np.zeros((frame_shape[0], frame_shape[1]), np.uint8)
            mask[:] = 255
            cv2.fillPoly(mask, pts=[contour], color=(0))
        else:
            mask = cv2.imread("/config/{}".format(config['mask']),
                              cv2.IMREAD_GRAYSCALE)
    else:
        mask = None

    if mask is None or mask.size == 0:
        mask = np.zeros((frame_shape[0], frame_shape[1]), np.uint8)
        mask[:] = 255

    motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
    object_detector = RemoteObjectDetector(name, '/labelmap.txt',
                                           detection_queue, result_connection)

    object_tracker = ObjectTracker(10)

    frame_manager = SharedMemoryFrameManager()

    process_frames(name, frame_queue, frame_shape, frame_manager,
                   motion_detector, object_detector, object_tracker,
                   detected_objects_queue, fps, detection_fps, detection_frame,
                   objects_to_track, object_filters, mask, stop_event)

    print(f"{name}: exiting subprocess")
Пример #4
0
def track_camera(name, camera, ffmpeg_global_config, global_objects_config,
                 detection_queue, detected_objects_queue, fps, skipped_fps,
                 detection_fps):
    info(f"Starting process for {name}: {os.getpid()}")
    # info("name={} config:{} ffmpeg_global_config={} global_objects_config={}  fps={}  skipped_fps={} detection_fps={} ".format(name, config, ffmpeg_global_config, global_objects_config, fps, skipped_fps, detection_fps))
    # Merge the ffmpeg config with the global config
    config = camera.camera_conf
    ffmpeg = config.get('ffmpeg', {})
    # info(ffmpeg)
    # ffmpeg_input = get_ffmpeg_input(ffmpeg['input'])
    ffmpeg_input = camera.live_address
    # info(camera.live_address)
    # info("6666666")
    ffmpeg_global_args = ffmpeg.get('global_args',
                                    ffmpeg_global_config['global_args'])
    ffmpeg_hwaccel_args = ffmpeg.get('hwaccel_args',
                                     ffmpeg_global_config['hwaccel_args'])
    ffmpeg_input_args = ffmpeg.get('input_args',
                                   ffmpeg_global_config['input_args'])
    ffmpeg_output_args = ffmpeg.get('output_args',
                                    ffmpeg_global_config['output_args'])
    ffmpeg_cmd = (['ffmpeg'] + ffmpeg_global_args + ffmpeg_hwaccel_args +
                  ffmpeg_input_args + ['-i', ffmpeg_input] +
                  ffmpeg_output_args + ['pipe:'])

    # info(ffmpeg_cmd)

    # Merge the tracked object config with the global config
    camera_objects_config = config.get('objects', {})
    # combine tracked objects lists
    objects_to_track = set().union(
        global_objects_config.get('track', ['person', 'car', 'truck']),
        camera_objects_config.get('track', []))
    # merge object filters
    global_object_filters = global_objects_config.get('filters', {})
    camera_object_filters = camera_objects_config.get('filters', {})
    objects_with_config = set().union(global_object_filters.keys(),
                                      camera_object_filters.keys())
    object_filters = {}
    for obj in objects_with_config:
        object_filters[obj] = {
            **global_object_filters.get(obj, {}),
            **camera_object_filters.get(obj, {})
        }

    expected_fps = config['fps']
    take_frame = config.get('take_frame', 1)

    if 'width' in config and 'height' in config:
        frame_shape = (config['height'], config['width'], 3)
    else:

        frame_shape = get_frame_shape(ffmpeg_input)
        info(frame_shape)

    frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]

    try:
        sa.delete(name)
    except:
        pass

    frame = sa.create(name, shape=frame_shape, dtype=np.uint8)

    # load in the mask for object detection
    if 'mask' in config:
        mask = cv2.imread("/config/{}".format(config['mask']),
                          cv2.IMREAD_GRAYSCALE)
    else:
        mask = None

    if mask is None:
        mask = np.zeros((frame_shape[0], frame_shape[1], 1), np.uint8)
        mask[:] = 255

    motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
    object_detector = RemoteObjectDetector(name, '/labelmap.txt',
                                           detection_queue)

    object_tracker = ObjectTracker(10)

    ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, frame_size)

    plasma_client = plasma.connect("/tmp/plasma")
    frame_num = 0
    avg_wait = 0.0
    fps_tracker = EventsPerSecond()
    skipped_fps_tracker = EventsPerSecond()
    fps_tracker.start()
    skipped_fps_tracker.start()
    object_detector.fps.start()
    while True:
        start = datetime.datetime.now().timestamp()
        frame_bytes = ffmpeg_process.stdout.read(frame_size)
        duration = datetime.datetime.now().timestamp() - start
        avg_wait = (avg_wait * 99 + duration) / 100

        if not frame_bytes:
            rc = ffmpeg_process.poll()
            if rc is not None:
                info(f"{name}: ffmpeg_process exited unexpectedly with {rc}")
                ffmpeg_process = start_or_restart_ffmpeg(
                    ffmpeg_cmd, frame_size, ffmpeg_process)
                time.sleep(10)
            else:
                error(
                    f"{name}: ffmpeg_process is still running but didnt return any bytes"
                )
            continue

        # limit frame rate
        frame_num += 1
        if (frame_num % take_frame) != 0:
            continue

        fps_tracker.update()
        fps.value = fps_tracker.eps()
        detection_fps.value = object_detector.fps.eps()

        frame_time = datetime.datetime.now().timestamp()

        # Store frame in numpy array
        frame[:] = (np.frombuffer(frame_bytes, np.uint8).reshape(frame_shape))

        # look for motion
        motion_boxes = motion_detector.detect(frame)

        # skip object detection if we are below the min_fps and wait time is less than half the average
        if frame_num > 100 and fps.value < expected_fps - 1 and duration < 0.5 * avg_wait:
            skipped_fps_tracker.update()
            skipped_fps.value = skipped_fps_tracker.eps()
            continue

        skipped_fps.value = skipped_fps_tracker.eps()

        tracked_objects = object_tracker.tracked_objects.values()

        # merge areas of motion that intersect with a known tracked object into a single area to look at
        areas_of_interest = []
        used_motion_boxes = []
        for obj in tracked_objects:
            x_min, y_min, x_max, y_max = obj['box']
            for m_index, motion_box in enumerate(motion_boxes):
                if area(intersection(obj['box'],
                                     motion_box)) / area(motion_box) > .5:
                    used_motion_boxes.append(m_index)
                    x_min = min(obj['box'][0], motion_box[0])
                    y_min = min(obj['box'][1], motion_box[1])
                    x_max = max(obj['box'][2], motion_box[2])
                    y_max = max(obj['box'][3], motion_box[3])
            areas_of_interest.append((x_min, y_min, x_max, y_max))
        unused_motion_boxes = set(range(
            0, len(motion_boxes))).difference(used_motion_boxes)

        # compute motion regions
        motion_regions = [
            calculate_region(frame_shape, motion_boxes[i][0],
                             motion_boxes[i][1], motion_boxes[i][2],
                             motion_boxes[i][3], 1.2)
            for i in unused_motion_boxes
        ]

        # compute tracked object regions
        object_regions = [
            calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
            for a in areas_of_interest
        ]

        # merge regions with high IOU
        merged_regions = motion_regions + object_regions
        while True:
            max_iou = 0.0
            max_indices = None
            region_indices = range(len(merged_regions))
            for a, b in itertools.combinations(region_indices, 2):
                iou = intersection_over_union(merged_regions[a],
                                              merged_regions[b])
                if iou > max_iou:
                    max_iou = iou
                    max_indices = (a, b)
            if max_iou > 0.1:
                a = merged_regions[max_indices[0]]
                b = merged_regions[max_indices[1]]
                merged_regions.append(
                    calculate_region(frame_shape, min(a[0], b[0]),
                                     min(a[1], b[1]), max(a[2], b[2]),
                                     max(a[3], b[3]), 1))
                del merged_regions[max(max_indices[0], max_indices[1])]
                del merged_regions[min(max_indices[0], max_indices[1])]
            else:
                break

        # resize regions and detect
        detections = []
        for region in merged_regions:

            tensor_input = create_tensor_input(frame, region)

            region_detections = object_detector.detect(tensor_input)

            for d in region_detections:
                box = d[2]
                size = region[2] - region[0]
                x_min = int((box[1] * size) + region[0])
                y_min = int((box[0] * size) + region[1])
                x_max = int((box[3] * size) + region[0])
                y_max = int((box[2] * size) + region[1])
                det = (d[0], d[1], (x_min, y_min, x_max, y_max),
                       (x_max - x_min) * (y_max - y_min), region)
                if filtered(det, objects_to_track, object_filters, mask):
                    continue
                detections.append(det)

        #########
        # merge objects, check for clipped objects and look again up to N times
        #########
        refining = True
        refine_count = 0
        while refining and refine_count < 4:
            refining = False

            # group by name
            detected_object_groups = defaultdict(lambda: [])
            for detection in detections:
                detected_object_groups[detection[0]].append(detection)

            selected_objects = []
            for group in detected_object_groups.values():

                # apply non-maxima suppression to suppress weak, overlapping bounding boxes
                boxes = [(o[2][0], o[2][1], o[2][2] - o[2][0],
                          o[2][3] - o[2][1]) for o in group]
                confidences = [o[1] for o in group]
                idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)

                for index in idxs:
                    obj = group[index[0]]
                    if clipped(obj, frame_shape):  #obj['clipped']:
                        box = obj[2]
                        # calculate a new region that will hopefully get the entire object
                        region = calculate_region(frame_shape, box[0], box[1],
                                                  box[2], box[3])

                        tensor_input = create_tensor_input(frame, region)
                        # run detection on new region
                        refined_detections = object_detector.detect(
                            tensor_input)
                        for d in refined_detections:
                            box = d[2]
                            size = region[2] - region[0]
                            x_min = int((box[1] * size) + region[0])
                            y_min = int((box[0] * size) + region[1])
                            x_max = int((box[3] * size) + region[0])
                            y_max = int((box[2] * size) + region[1])
                            det = (d[0], d[1], (x_min, y_min, x_max, y_max),
                                   (x_max - x_min) * (y_max - y_min), region)
                            if filtered(det, objects_to_track, object_filters,
                                        mask):
                                continue
                            selected_objects.append(det)

                        refining = True
                    else:
                        selected_objects.append(obj)

            # set the detections list to only include top, complete objects
            # and new detections
            detections = selected_objects

            if refining:
                refine_count += 1

        # now that we have refined our detections, we need to track objects
        object_tracker.match_and_update(frame_time, detections)

        # put the frame in the plasma store
        object_id = hashlib.sha1(str.encode(f"{name}{frame_time}")).digest()
        plasma_client.put(frame, plasma.ObjectID(object_id))
        # add to the queue
        detected_objects_queue.put(
            (name, frame_time, object_tracker.tracked_objects))

    info(f"{name}: exiting subprocess")
Пример #5
0
def track_camera(name, config, global_objects_config, frame_queue, frame_shape,
                 detection_queue, detected_objects_queue, fps, detection_fps,
                 read_start, detection_frame):
    print(f"Starting process for {name}: {os.getpid()}")
    listen()

    detection_frame.value = 0.0

    # Merge the tracked object config with the global config
    camera_objects_config = config.get('objects', {})
    # combine tracked objects lists
    objects_to_track = set().union(
        global_objects_config.get('track', ['person', 'car', 'truck']),
        camera_objects_config.get('track', []))
    # merge object filters
    global_object_filters = global_objects_config.get('filters', {})
    camera_object_filters = camera_objects_config.get('filters', {})
    objects_with_config = set().union(global_object_filters.keys(),
                                      camera_object_filters.keys())
    object_filters = {}
    for obj in objects_with_config:
        object_filters[obj] = {
            **global_object_filters.get(obj, {}),
            **camera_object_filters.get(obj, {})
        }

    frame = np.zeros(frame_shape, np.uint8)

    # load in the mask for object detection
    if 'mask' in config:
        mask = cv2.imread("/config/{}".format(config['mask']),
                          cv2.IMREAD_GRAYSCALE)
    else:
        mask = None

    if mask is None:
        mask = np.zeros((frame_shape[0], frame_shape[1], 1), np.uint8)
        mask[:] = 255

    motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
    object_detector = RemoteObjectDetector(name, '/labelmap.txt',
                                           detection_queue)

    camera_tracker_config = config.get('tracker', {
        "min_hits": 1,
        "max_age": 5,
        "iou_threshold": 0.2
    })
    object_tracker = ObjectTracker(camera_tracker_config["min_hits"],
                                   camera_tracker_config["max_age"],
                                   camera_tracker_config["iou_threshold"])

    plasma_client = PlasmaManager()
    avg_wait = 0.0
    fps_tracker = EventsPerSecond()
    fps_tracker.start()
    object_detector.fps.start()
    while True:
        read_start.value = datetime.datetime.now().timestamp()
        frame_time = frame_queue.get()
        duration = datetime.datetime.now().timestamp() - read_start.value
        read_start.value = 0.0
        avg_wait = (avg_wait * 99 + duration) / 100
        detection_frame.value = frame_time

        # Get frame from plasma store
        frame = plasma_client.get(f"{name}{frame_time}")

        if frame is plasma.ObjectNotAvailable:
            continue

        fps_tracker.update()
        fps.value = fps_tracker.eps()
        detection_fps.value = object_detector.fps.eps()

        # look for motion
        motion_boxes = motion_detector.detect(frame)

        tracked_objects = object_tracker.tracked_objects.values()

        # merge areas of motion that intersect with a known tracked object into a single area to look at
        areas_of_interest = []
        used_motion_boxes = []
        for obj in tracked_objects:
            x_min, y_min, x_max, y_max = obj['box']
            for m_index, motion_box in enumerate(motion_boxes):
                if intersection_over_union(motion_box, obj['box']) > .2:
                    used_motion_boxes.append(m_index)
                    x_min = min(obj['box'][0], motion_box[0])
                    y_min = min(obj['box'][1], motion_box[1])
                    x_max = max(obj['box'][2], motion_box[2])
                    y_max = max(obj['box'][3], motion_box[3])
            areas_of_interest.append((x_min, y_min, x_max, y_max))
        unused_motion_boxes = set(range(
            0, len(motion_boxes))).difference(used_motion_boxes)

        # compute motion regions
        motion_regions = [
            calculate_region(frame_shape, motion_boxes[i][0],
                             motion_boxes[i][1], motion_boxes[i][2],
                             motion_boxes[i][3], 1.2)
            for i in unused_motion_boxes
        ]

        # compute tracked object regions
        object_regions = [
            calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
            for a in areas_of_interest
        ]

        # merge regions with high IOU
        merged_regions = motion_regions + object_regions
        while True:
            max_iou = 0.0
            max_indices = None
            region_indices = range(len(merged_regions))
            for a, b in itertools.combinations(region_indices, 2):
                iou = intersection_over_union(merged_regions[a],
                                              merged_regions[b])
                if iou > max_iou:
                    max_iou = iou
                    max_indices = (a, b)
            if max_iou > 0.1:
                a = merged_regions[max_indices[0]]
                b = merged_regions[max_indices[1]]
                merged_regions.append(
                    calculate_region(frame_shape, min(a[0], b[0]),
                                     min(a[1], b[1]), max(a[2], b[2]),
                                     max(a[3], b[3]), 1))
                del merged_regions[max(max_indices[0], max_indices[1])]
                del merged_regions[min(max_indices[0], max_indices[1])]
            else:
                break

        # resize regions and detect
        detections = []
        for region in merged_regions:

            tensor_input = create_tensor_input(frame, region)

            region_detections = object_detector.detect(tensor_input)

            for d in region_detections:
                box = d[2]
                size = region[2] - region[0]
                x_min = int((box[1] * size) + region[0])
                y_min = int((box[0] * size) + region[1])
                x_max = int((box[3] * size) + region[0])
                y_max = int((box[2] * size) + region[1])
                det = (d[0], d[1], (x_min, y_min, x_max, y_max),
                       (x_max - x_min) * (y_max - y_min), region)
                if filtered(det, objects_to_track, object_filters, mask):
                    continue
                detections.append(det)

        #########
        # merge objects, check for clipped objects and look again up to N times
        #########
        refining = True
        refine_count = 0
        while refining and refine_count < 4:
            refining = False

            # group by name
            detected_object_groups = defaultdict(lambda: [])
            for detection in detections:
                detected_object_groups[detection[0]].append(detection)

            selected_objects = []
            for group in detected_object_groups.values():

                # apply non-maxima suppression to suppress weak, overlapping bounding boxes
                boxes = [(o[2][0], o[2][1], o[2][2] - o[2][0],
                          o[2][3] - o[2][1]) for o in group]
                confidences = [o[1] for o in group]
                idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)

                for index in idxs:
                    obj = group[index[0]]
                    if clipped(obj, frame_shape):
                        box = obj[2]
                        # calculate a new region that will hopefully get the entire object
                        region = calculate_region(frame_shape, box[0], box[1],
                                                  box[2], box[3])

                        tensor_input = create_tensor_input(frame, region)
                        # run detection on new region
                        refined_detections = object_detector.detect(
                            tensor_input)
                        for d in refined_detections:
                            box = d[2]
                            size = region[2] - region[0]
                            x_min = int((box[1] * size) + region[0])
                            y_min = int((box[0] * size) + region[1])
                            x_max = int((box[3] * size) + region[0])
                            y_max = int((box[2] * size) + region[1])
                            det = (d[0], d[1], (x_min, y_min, x_max, y_max),
                                   (x_max - x_min) * (y_max - y_min), region)
                            if filtered(det, objects_to_track, object_filters,
                                        mask):
                                continue
                            selected_objects.append(det)

                        refining = True
                    else:
                        selected_objects.append(obj)

            # set the detections list to only include top, complete objects
            # and new detections
            detections = selected_objects

            if refining:
                refine_count += 1

        # now that we have refined our detections, we need to track objects
        object_tracker.match_and_update(frame_time, detections)

        # add to the queue
        detected_objects_queue.put(
            (name, frame_time, object_tracker.tracked_objects))

    print(f"{name}: exiting subprocess")