Exemple #1
0
    def detect(
        self,
        img: np.ndarray,
        out_img: np.ndarray = None,
        color: Color = Color.yellow(),
        line_thickness: int = 2,
    ) -> np.ndarray:
        """
        Detect self.template in img and draw a box around it.

        :param img: The image to detect the template in
        :param out_img: The image to draw on (should be at least as large as img)
        :param color: The color of the bounding box
        :param line_thickness: The thickness of he bounding box line
        :return: out_img
        """

        # TODO: Make this scale invariant.
        #  see https://www.pyimagesearch.com/2015/01/26/multi-scale-template-matching-using-python-opencv/
        if out_img is None:
            out_img = img.copy()
        else:
            assert all(
                img_dim <= out_img_dim
                for img_dim, out_img_dim in zip(img.shape, out_img.shape))

        similarity_map: np.ndarray = self._compute_similarity_map(img)
        match_xy_indices: Iterable[Tuple[
            int, int]] = self._get_match_xy_indices(similarity_map)

        # For each match, draw the bounding box
        for x, y in match_xy_indices:
            top_left = x, y
            bottom_right = x + self.temp_w, y + self.temp_h
            cv.rectangle(
                img=out_img,
                pt1=top_left,
                pt2=bottom_right,
                color=color.to_bgr(),
                thickness=line_thickness,
            )

        return out_img
    def track_faces(
        self,
        clip_dir: str,
        out_base_dir: str,
        draw_on_dir: str = None,
        detect_only: bool = False,
    ):
        # Setup
        # load image paths
        frames: List[os.DirEntry] = load_and_sort_dir(clip_dir)
        draw_on_frames: List[os.DirEntry] = load_and_sort_dir(draw_on_dir)
        assert len(draw_on_frames) in (0, len(frames))

        # create output directory
        out_dir: str = create_output_dir(out_base_dir)

        # initialize variables required for object tracking
        new_face_id: Iterator[int] = count(start=1)
        tracked_faces: Dict[int, TrackedFace] = {}

        # Iterate Through Video Frames
        for frame, draw_on_frame in zip_longest(frames, draw_on_frames):
            # load new frame
            img = cv.imread(frame.path)

            # load out_img
            out_img: np.ndarray = (img.copy() if draw_on_frame is None else
                                   cv.imread(draw_on_frame.path))

            # ensure out_img is at least as large as img
            assert len(img.shape) == len(out_img.shape) and all(
                out_dim >= in_dim
                for in_dim, out_dim in zip(img.shape, out_img.shape))

            detected_face_boxes: List[Box] = self.detect_face_boxes(img)

            # If tracking is disabled, draw the boxes and move to next frame
            if detect_only:
                write_boxes(
                    out_path=os.path.join(out_dir, frame.name),
                    out_img=out_img,
                    boxes=detected_face_boxes,
                )
                continue

            detected_faces: List[GenderedFace] = gender_faces(
                img=img,
                faces=[
                    self.recognize_face(img, detected_face_box)
                    for detected_face_box in detected_face_boxes
                ],
            )

            current_face_ids: Set[int] = set()
            lost_face_ids: Set[int] = set()

            # Iterate over the known (tracked) faces
            for tracked_face in tracked_faces.values():
                matched_detected_faces: List[GenderedFace] = [
                    detected_face for detected_face in detected_faces
                    if self.faces_match(tracked_face, detected_face)
                ]

                if not matched_detected_faces:
                    # Tracked face was not matched to and detected face
                    # Increment staleness since we didn't detect this face
                    tracked_face.staleness += 1
                    # Update tracker with img and get confidence
                    tracked_confidence: float = tracked_face.tracker.update(
                        img)
                    if (tracked_face.staleness < self.tracking_expiry
                            and tracked_confidence >= self.tracking_threshold):
                        # Assume face is still in frame but we failed to detect
                        # Update box with predicted location box
                        predicted_box: Box = Box.from_dlib_rect(
                            tracked_face.tracker.get_position())
                        tracked_face.box = predicted_box
                        current_face_ids.add(tracked_face.id_)
                    else:
                        # Assume face has left frame because either it is too stale or confidence is too low
                        if self.remember_identities:
                            # Set effectively infinite staleness to force tracker reset if face is found again later
                            tracked_face.staleness = sys.maxsize
                        else:
                            lost_face_ids.add(tracked_face.id_)
                    continue

                # Tracked face was matched to one or more detected faces
                # Multiple matches should rarely happen if faces in frame are distinct. We take closest to prev location
                # TODO: Handle same person multiple times in frame
                matched_detected_face = min(
                    matched_detected_faces,
                    key=lambda face: tracked_face.box.distance_to(face.box),
                )
                # Update tracked_face
                tracked_face.descriptor = matched_detected_face.descriptor
                tracked_face.shape = matched_detected_face.descriptor
                tracked_face.box = matched_detected_face.box
                if tracked_face.staleness >= self.tracking_expiry:
                    # Face was not present in last frame so reset tracker
                    tracked_face.tracker = dlib.correlation_tracker()
                    tracked_face.tracker.start_track(
                        image=img,
                        bounding_box=tracked_face.box.to_dlib_rect())
                else:
                    # Face was present in last frame so just update guess
                    tracked_face.tracker.update(
                        image=img, guess=tracked_face.box.to_dlib_rect())
                tracked_face.staleness = 0
                tracked_face.gender = matched_detected_face.gender
                tracked_face.gender_confidence = matched_detected_face.gender_confidence
                # Add tracked_face to current_ids to reflect that it is in the frame
                current_face_ids.add(tracked_face.id_)
                # remove matched_detected_face from detected_faces
                detected_faces.remove(matched_detected_face)

            # Delete all faces that were being tracked but are now lost
            # lost_face_ids will always be empty if self.remember_identities is True
            for id_ in lost_face_ids:
                del tracked_faces[id_]

            for new_face in detected_faces:
                # This is a new face (previously unseen)
                id_ = next(new_face_id)
                tracker: dlib.correlation_tracker = dlib.correlation_tracker()
                tracker.start_track(image=img,
                                    bounding_box=new_face.box.to_dlib_rect())
                tracked_faces[id_] = TrackedFace(
                    box=new_face.box,
                    descriptor=new_face.descriptor,
                    shape=new_face.shape,
                    id_=id_,
                    tracker=tracker,
                    gender=new_face.gender,
                    gender_confidence=new_face.gender_confidence,
                )
                current_face_ids.add(id_)

            write_boxes(
                out_path=os.path.join(out_dir, frame.name),
                out_img=out_img,
                boxes=[tracked_faces[id_].box for id_ in current_face_ids],
                labelss=[[
                    (
                        f'Person {id_}',
                        Point(3, 14),
                    ),
                    (
                        f'{tracked_faces[id_].gender.name[0].upper()}: {round(100 * tracked_faces[id_].gender_confidence, 1)}%',
                        Point(3, 30),
                    ),
                ] for id_ in current_face_ids],
                color=Color.yellow(),
            )

            print(
                f"Processed {frame.name}.  Currently tracking {len(tracked_faces)} faces"
            )
        return out_dir