Ejemplo n.º 1
0
def capture_frames(ffmpeg_process, camera_name, frame_shape,
                   frame_manager: FrameManager, frame_queue, take_frame: int,
                   fps: EventsPerSecond, skipped_fps: EventsPerSecond,
                   stop_event: mp.Event, current_frame: mp.Value):

    frame_num = 0
    frame_size = frame_shape[0] * frame_shape[1] * 3 // 2
    skipped_fps.start()
    while True:
        if stop_event.is_set():
            print(f"{camera_name}: stop event set. exiting capture thread...")
            break

        frame_bytes = ffmpeg_process.stdout.read(frame_size)
        current_frame.value = datetime.datetime.now().timestamp()

        if len(frame_bytes) < frame_size:
            print(
                f"{camera_name}: ffmpeg sent a broken frame. something is wrong."
            )

            if ffmpeg_process.poll() != None:
                print(
                    f"{camera_name}: ffmpeg process is not running. exiting capture thread..."
                )
                break
            else:
                continue

        fps.update()

        frame_num += 1
        if (frame_num % take_frame) != 0:
            skipped_fps.update()
            continue

        # if the queue is full, skip this frame
        if frame_queue.full():
            skipped_fps.update()
            continue

        # put the frame in the frame manager
        frame_buffer = frame_manager.create(
            f"{camera_name}{current_frame.value}", frame_size)
        frame_buffer[:] = frame_bytes[:]
        frame_manager.close(f"{camera_name}{current_frame.value}")

        # add to the queue
        frame_queue.put(current_frame.value)
Ejemplo n.º 2
0
def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: FrameManager, 
    frame_queue, take_frame: int, fps:mp.Value, skipped_fps: mp.Value, 
    stop_event: mp.Event, current_frame: mp.Value):

    frame_num = 0
    frame_size = frame_shape[0] * frame_shape[1] * 3 // 2
    frame_rate = EventsPerSecond()
    frame_rate.start()
    skipped_eps = EventsPerSecond()
    skipped_eps.start()
    while True:
        fps.value = frame_rate.eps()
        skipped_fps = skipped_eps.eps()
        if stop_event.is_set():
            print(f"{camera_name}: stop event set. exiting capture thread...")
            break

        current_frame.value = datetime.datetime.now().timestamp()
        frame_name = f"{camera_name}{current_frame.value}"
        frame_buffer = frame_manager.create(frame_name, frame_size)
        try:
          frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
        except:
          print(f"{camera_name}: ffmpeg sent a broken frame. something is wrong.")

          if ffmpeg_process.poll() != None:
              print(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
              frame_manager.delete(frame_name)
              break
          
          continue

        frame_rate.update()

        frame_num += 1
        if (frame_num % take_frame) != 0:
            skipped_eps.update()
            frame_manager.delete(frame_name)
            continue

        # if the queue is full, skip this frame
        if frame_queue.full():
            skipped_eps.update()
            frame_manager.delete(frame_name)
            continue

        # close the frame
        frame_manager.close(frame_name)

        # add to the queue
        frame_queue.put(current_frame.value)
Ejemplo n.º 3
0
def capture_frames(
    ffmpeg_process,
    camera_name,
    frame_shape,
    frame_manager: FrameManager,
    frame_queue,
    fps: mp.Value,
    skipped_fps: mp.Value,
    current_frame: mp.Value,
):

    frame_size = frame_shape[0] * frame_shape[1]
    frame_rate = EventsPerSecond()
    frame_rate.start()
    skipped_eps = EventsPerSecond()
    skipped_eps.start()
    while True:
        fps.value = frame_rate.eps()
        skipped_fps = skipped_eps.eps()

        current_frame.value = datetime.datetime.now().timestamp()
        frame_name = f"{camera_name}{current_frame.value}"
        frame_buffer = frame_manager.create(frame_name, frame_size)
        try:
            frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
        except Exception as e:
            logger.error(f"{camera_name}: Unable to read frames from ffmpeg process.")

            if ffmpeg_process.poll() != None:
                logger.error(
                    f"{camera_name}: ffmpeg process is not running. exiting capture thread..."
                )
                frame_manager.delete(frame_name)
                break
            continue

        frame_rate.update()

        # if the queue is full, skip this frame
        if frame_queue.full():
            skipped_eps.update()
            frame_manager.delete(frame_name)
            continue

        # close the frame
        frame_manager.close(frame_name)

        # add to the queue
        frame_queue.put(current_frame.value)
Ejemplo n.º 4
0
def process_frames(camera_name: str,
                   frame_queue: mp.Queue,
                   frame_shape,
                   frame_manager: FrameManager,
                   motion_detector: MotionDetector,
                   object_detector: RemoteObjectDetector,
                   object_tracker: ObjectTracker,
                   detected_objects_queue: mp.Queue,
                   fps: mp.Value,
                   detection_fps: mp.Value,
                   current_frame_time: mp.Value,
                   objects_to_track: List[str],
                   object_filters: Dict,
                   mask,
                   stop_event: mp.Event,
                   exit_on_empty: bool = False):

    fps_tracker = EventsPerSecond()
    fps_tracker.start()

    while True:
        if stop_event.is_set() or (exit_on_empty and frame_queue.empty()):
            print(f"Exiting track_objects...")
            break

        try:
            frame_time = frame_queue.get(True, 10)
        except queue.Empty:
            continue

        current_frame_time.value = frame_time

        frame = frame_manager.get(f"{camera_name}{frame_time}",
                                  (frame_shape[0] * 3 // 2, frame_shape[1]))

        if frame is None:
            print(f"{camera_name}: frame {frame_time} is not in memory store.")
            continue

        fps_tracker.update()
        fps.value = fps_tracker.eps()

        # look for motion
        motion_boxes = motion_detector.detect(frame)

        tracked_object_boxes = [
            obj['box'] for obj in object_tracker.tracked_objects.values()
        ]

        # combine motion boxes with known locations of existing objects
        combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)

        # compute regions
        regions = [
            calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
            for a in combined_boxes
        ]

        # combine overlapping regions
        combined_regions = reduce_boxes(regions)

        # re-compute regions
        regions = [
            calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.0)
            for a in combined_regions
        ]

        # resize regions and detect
        detections = []
        for region in regions:
            detections.extend(
                detect(object_detector, frame, region, objects_to_track,
                       object_filters, mask))

        #########
        # merge objects, check for clipped objects and look again up to 4 times
        #########
        refining = True
        refine_count = 0
        while refining and refine_count < 4:
            refining = False

            # group by name
            detected_object_groups = defaultdict(lambda: [])
            for detection in detections:
                detected_object_groups[detection[0]].append(detection)

            selected_objects = []
            for group in detected_object_groups.values():

                # apply non-maxima suppression to suppress weak, overlapping bounding boxes
                boxes = [(o[2][0], o[2][1], o[2][2] - o[2][0],
                          o[2][3] - o[2][1]) for o in group]
                confidences = [o[1] for o in group]
                idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)

                for index in idxs:
                    obj = group[index[0]]
                    if clipped(obj, frame_shape):
                        box = obj[2]
                        # calculate a new region that will hopefully get the entire object
                        region = calculate_region(frame_shape, box[0], box[1],
                                                  box[2], box[3])

                        selected_objects.extend(
                            detect(object_detector, frame, region,
                                   objects_to_track, object_filters, mask))

                        refining = True
                    else:
                        selected_objects.append(obj)
            # set the detections list to only include top, complete objects
            # and new detections
            detections = selected_objects

            if refining:
                refine_count += 1

        # now that we have refined our detections, we need to track objects
        object_tracker.match_and_update(frame_time, detections)

        # add to the queue
        detected_objects_queue.put(
            (camera_name, frame_time, object_tracker.tracked_objects))

        detection_fps.value = object_detector.fps.eps()

        frame_manager.close(f"{camera_name}{frame_time}")
Ejemplo n.º 5
0
def process_frames(
    camera_name: str,
    frame_queue: mp.Queue,
    frame_shape,
    model_shape,
    detect_config: DetectConfig,
    frame_manager: FrameManager,
    motion_detector: MotionDetector,
    object_detector: RemoteObjectDetector,
    object_tracker: ObjectTracker,
    detected_objects_queue: mp.Queue,
    process_info: Dict,
    objects_to_track: List[str],
    object_filters,
    detection_enabled: mp.Value,
    stop_event,
    exit_on_empty: bool = False,
):

    fps = process_info["process_fps"]
    detection_fps = process_info["detection_fps"]
    current_frame_time = process_info["detection_frame"]

    fps_tracker = EventsPerSecond()
    fps_tracker.start()

    startup_scan_counter = 0

    while not stop_event.is_set():
        if exit_on_empty and frame_queue.empty():
            logger.info(f"Exiting track_objects...")
            break

        try:
            frame_time = frame_queue.get(True, 10)
        except queue.Empty:
            continue

        current_frame_time.value = frame_time

        frame = frame_manager.get(
            f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1])
        )

        if frame is None:
            logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
            continue

        # look for motion
        motion_boxes = motion_detector.detect(frame)

        regions = []

        # if detection is disabled
        if not detection_enabled.value:
            object_tracker.match_and_update(frame_time, [])
        else:
            # get stationary object ids
            # check every Nth frame for stationary objects
            # disappeared objects are not stationary
            # also check for overlapping motion boxes
            stationary_object_ids = [
                obj["id"]
                for obj in object_tracker.tracked_objects.values()
                # if there hasn't been motion for 10 frames
                if obj["motionless_count"] >= 10
                # and it isn't due for a periodic check
                and (
                    detect_config.stationary.interval == 0
                    or obj["motionless_count"] % detect_config.stationary.interval != 0
                )
                # and it hasn't disappeared
                and object_tracker.disappeared[obj["id"]] == 0
                # and it doesn't overlap with any current motion boxes
                and not intersects_any(obj["box"], motion_boxes)
            ]

            # get tracked object boxes that aren't stationary
            tracked_object_boxes = [
                obj["box"]
                for obj in object_tracker.tracked_objects.values()
                if not obj["id"] in stationary_object_ids
            ]

            # combine motion boxes with known locations of existing objects
            combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)

            region_min_size = max(model_shape[0], model_shape[1])
            # compute regions
            regions = [
                calculate_region(
                    frame_shape,
                    a[0],
                    a[1],
                    a[2],
                    a[3],
                    region_min_size,
                    multiplier=random.uniform(1.2, 1.5),
                )
                for a in combined_boxes
            ]

            # consolidate regions with heavy overlap
            regions = [
                calculate_region(
                    frame_shape, a[0], a[1], a[2], a[3], region_min_size, multiplier=1.0
                )
                for a in reduce_boxes(regions, 0.4)
            ]

            # if starting up, get the next startup scan region
            if startup_scan_counter < 9:
                ymin = int(frame_shape[0] / 3 * startup_scan_counter / 3)
                ymax = int(frame_shape[0] / 3 + ymin)
                xmin = int(frame_shape[1] / 3 * startup_scan_counter / 3)
                xmax = int(frame_shape[1] / 3 + xmin)
                regions.append(
                    calculate_region(
                        frame_shape,
                        xmin,
                        ymin,
                        xmax,
                        ymax,
                        region_min_size,
                        multiplier=1.2,
                    )
                )
                startup_scan_counter += 1

            # resize regions and detect
            # seed with stationary objects
            detections = [
                (
                    obj["label"],
                    obj["score"],
                    obj["box"],
                    obj["area"],
                    obj["region"],
                )
                for obj in object_tracker.tracked_objects.values()
                if obj["id"] in stationary_object_ids
            ]

            for region in regions:
                detections.extend(
                    detect(
                        object_detector,
                        frame,
                        model_shape,
                        region,
                        objects_to_track,
                        object_filters,
                    )
                )

            #########
            # merge objects, check for clipped objects and look again up to 4 times
            #########
            refining = len(regions) > 0
            refine_count = 0
            while refining and refine_count < 4:
                refining = False

                # group by name
                detected_object_groups = defaultdict(lambda: [])
                for detection in detections:
                    detected_object_groups[detection[0]].append(detection)

                selected_objects = []
                for group in detected_object_groups.values():

                    # apply non-maxima suppression to suppress weak, overlapping bounding boxes
                    boxes = [
                        (o[2][0], o[2][1], o[2][2] - o[2][0], o[2][3] - o[2][1])
                        for o in group
                    ]
                    confidences = [o[1] for o in group]
                    idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)

                    for index in idxs:
                        obj = group[index[0]]
                        if clipped(obj, frame_shape):
                            box = obj[2]
                            # calculate a new region that will hopefully get the entire object
                            region = calculate_region(
                                frame_shape,
                                box[0],
                                box[1],
                                box[2],
                                box[3],
                                region_min_size,
                            )

                            regions.append(region)

                            selected_objects.extend(
                                detect(
                                    object_detector,
                                    frame,
                                    model_shape,
                                    region,
                                    objects_to_track,
                                    object_filters,
                                )
                            )

                            refining = True
                        else:
                            selected_objects.append(obj)
                # set the detections list to only include top, complete objects
                # and new detections
                detections = selected_objects

                if refining:
                    refine_count += 1

            ## drop detections that overlap too much
            consolidated_detections = []

            # if detection was run on this frame, consolidate
            if len(regions) > 0:
                # group by name
                detected_object_groups = defaultdict(lambda: [])
                for detection in detections:
                    detected_object_groups[detection[0]].append(detection)

                # loop over detections grouped by label
                for group in detected_object_groups.values():
                    # if the group only has 1 item, skip
                    if len(group) == 1:
                        consolidated_detections.append(group[0])
                        continue

                    # sort smallest to largest by area
                    sorted_by_area = sorted(group, key=lambda g: g[3])

                    for current_detection_idx in range(0, len(sorted_by_area)):
                        current_detection = sorted_by_area[current_detection_idx][2]
                        overlap = 0
                        for to_check_idx in range(
                            min(current_detection_idx + 1, len(sorted_by_area)),
                            len(sorted_by_area),
                        ):
                            to_check = sorted_by_area[to_check_idx][2]
                            # if 90% of smaller detection is inside of another detection, consolidate
                            if (
                                area(intersection(current_detection, to_check))
                                / area(current_detection)
                                > 0.9
                            ):
                                overlap = 1
                                break
                        if overlap == 0:
                            consolidated_detections.append(
                                sorted_by_area[current_detection_idx]
                            )
                # now that we have refined our detections, we need to track objects
                object_tracker.match_and_update(frame_time, consolidated_detections)
            # else, just update the frame times for the stationary objects
            else:
                object_tracker.update_frame_times(frame_time)

        # add to the queue if not full
        if detected_objects_queue.full():
            frame_manager.delete(f"{camera_name}{frame_time}")
            continue
        else:
            fps_tracker.update()
            fps.value = fps_tracker.eps()
            detected_objects_queue.put(
                (
                    camera_name,
                    frame_time,
                    object_tracker.tracked_objects,
                    motion_boxes,
                    regions,
                )
            )
            detection_fps.value = object_detector.fps.eps()
            frame_manager.close(f"{camera_name}{frame_time}")