def get_processed_frame_object(frame_obj, scale=1.0):
        """Processes value produced by producer, returns prediction with png image.

        :param frame_obj: frame dictionary with frame information and frame itself
        :param scale: (0, 1] scale image before face recognition, speeds up processing, decreases accuracy
        :return: A dict updated with faces found in that frame, i.e. their location and encoding.
        """

        frame = np_from_json(frame_obj,
                             prefix_name=ORIGINAL_PREFIX)  # frame_obj = json
        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        frame = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_BGR2RGB)

        if scale != 1:
            # Resize frame of video to scale size for faster face recognition processing
            rgb_small_frame = cv2.resize(frame, (0, 0), fx=scale, fy=scale)

        else:
            rgb_small_frame = frame

        with timer("PROCESS RAW FRAME {}".format(frame_obj["frame_num"])):
            # Find all the faces and face encodings in the current frame of video
            with timer("Locations in frame"):
                face_locations = np.array(
                    face_recognition.face_locations(rgb_small_frame))
                face_locations_dict = np_to_json(face_locations,
                                                 prefix_name="face_locations")

            with timer("Encodings in frame"):
                face_encodings = np.array(
                    face_recognition.face_encodings(rgb_small_frame,
                                                    face_locations))
                face_encodings_dict = np_to_json(face_encodings,
                                                 prefix_name="face_encodings")

        frame_obj.update(face_locations_dict)
        frame_obj.update(face_encodings_dict)

        return frame_obj
Exemplo n.º 2
0
def results():
    if request.method == "POST":
        camera_numbers = int(request.form["camera_numbers"])
        return redirect(url_for("get_cameras", camera_numbers=camera_numbers),
                        code=302)

    # redirect to home if no images to display
    if "file_urls" not in session or session["file_urls"] == []:
        return redirect(url_for("index"), code=302)

    # redirect to home if no images to display
    if "known_faces" not in session or session["known_faces"] == []:
        return redirect(url_for("index"), code=302)

    # set the file_urls and remove the session variable
    file_urls = session["file_urls"]
    known_faces = session["known_faces"]
    known_face_encodings = [
        np.array(json.loads(kfe)) for kfe in session["known_face_encodings"]
    ]
    image_file_names = session["image_file_names"]

    print("\n", known_faces, "\n")
    # BROADCAST THE TARGET TO LOOK FOR:
    broadcast_message = np_to_json(np.array(known_face_encodings),
                                   prefix_name="known_face_encodings")
    broadcast_message.update(
        np_to_json(np.array(known_faces), prefix_name="known_faces"))
    broadcast_known_faces.send(TARGET_FACE_TOPIC, value=broadcast_message)

    # loop over uploaded images, in reality loop over test images or frames
    for file_name in image_file_names:

        file_path = "{}/{}".format(save_dir, file_name)

        image = face_recognition.load_image_file(file_path)

        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(image)
        face_encodings = face_recognition.face_encodings(image, face_locations)

        # faces found in this image
        face_names = []
        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            matches = face_recognition.compare_faces(known_face_encodings,
                                                     face_encoding,
                                                     tolerance=0.3)
            name = "Unknown"

            # If a match was found in known_face_encodings, just use the first one.
            if True in matches:
                first_match_index = matches.index(True)
                name = known_faces[first_match_index]

            face_names.append(name.title())

        # draw boxes for this frame
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            # Draw a box around the face
            color = (0, 0, 255)
            cv2.rectangle(image, (left, top), (right, bottom), color, 2)

            # Draw a label with a name below the face
            cv2.rectangle(image, (left, bottom - 21), (right, bottom), color,
                          cv2.FILLED)
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(image, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)
            break

        cv2.imwrite(file_path, cv2.cvtColor(image, cv2.COLOR_RGB2BGR))

    session.pop("file_urls", None)
    session.pop("known_faces", None)
    session.pop("known_face_encodings", None)
    session.pop("image_file_names", None)
    session.clear()

    return render_template("results.html",
                           file_urls_names=zip(file_urls, known_faces))
    def get_face_object(frame_obj, query_faces_data, scale=1.0):
        """Match query faces with detected faces in the frame, if matched put box and a tag.
        :param frame_obj: frame dictionary with frame information, frame, face encodings, locations.
        :param query_faces_data: message from query face topic, contains encoding and names of faces.
        Other way was to broadcast raw image and calculate encodings here.
        :param scale: to scale up as 1/scale, if in pre processing frames were scaled down for speedup.
        :return: A dict with modified frame, i.e. bounded box drawn around detected persons face.
        """

        # get frame from message
        frame = np_from_json(frame_obj,
                             prefix_name=ORIGINAL_PREFIX)  # frame_obj = json
        # get processed info from message
        face_locations = np_from_json(frame_obj, prefix_name="face_locations")
        face_encodings = np_from_json(frame_obj, prefix_name="face_encodings")
        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        frame = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_BGR2RGB)

        # get info entered by user through UI
        known_face_encodings = np_from_json(
            query_faces_data,
            prefix_name="known_face_encodings").tolist()  # (n, 128)
        known_faces = np_from_json(query_faces_data,
                                   prefix_name="known_faces").tolist()  # (n, )

        with timer("\nFACE RECOGNITION {}\n".format(frame_obj["frame_num"])):

            # Faces found in this image
            face_names = []
            with timer("Total Match time"):
                for i, face_encoding in enumerate(face_encodings):
                    # See if the face is a match for the known face(s)
                    with timer("Match {}th face time".format(i)):
                        matches = face_recognition.compare_faces(
                            known_face_encodings, face_encoding)

                    name = "Unknown"
                    # If a match was found in known_face_encodings, just use the first one.
                    if True in matches:
                        first_match_index = matches.index(True)
                        name = known_faces[first_match_index]

                    face_names.append(name.title())

            # Mark the results for this frame
            for (top, right, bottom,
                 left), name in zip(face_locations, face_names):
                # Scale back up face locations since the frame we detected in was scaled to 1/4 size

                if scale != 1:
                    top *= int(1 / scale)
                    right *= int(1 / scale)
                    bottom *= int(1 / scale)
                    left *= int(1 / scale)

                if name == "Unknown":
                    color = (0, 0, 255)  # blue
                else:
                    color = (255, 0, 0)  # red

                # Draw a box around the face
                cv2.rectangle(frame, (left, top), (right, bottom), color, 2)

                # Draw a label with a name below the face
                cv2.rectangle(frame, (left, bottom - 21), (right, bottom),
                              color, cv2.FILLED)
                cv2.putText(frame, name, (left + 6, bottom - 6),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)

        frame = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_BGR2RGB)
        frame_dict = np_to_json(frame, prefix_name=PREDICTED_PREFIX)
        prediction = None
        if face_names:
            prediction = face_names[0]

        result = {
            "prediction": prediction,
            "predict_time": str(time.time()),
            "latency": str(time.time() - int(frame_obj["timestamp"]))
        }

        frame_obj.update(frame_dict)  # update frame with prediction
        result.update(frame_obj)  # add prediction results

        return result