def get_processed_frame_object(frame_obj, scale=1.0):
        """Processes value produced by producer, returns prediction with png image.

        :param frame_obj: frame dictionary with frame information and frame itself
        :param scale: (0, 1] scale image before face recognition, speeds up processing, decreases accuracy
        :return: A dict updated with faces found in that frame, i.e. their location and encoding.
        """

        frame = np_from_json(frame_obj,
                             prefix_name=ORIGINAL_PREFIX)  # frame_obj = json
        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        frame = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_BGR2RGB)

        if scale != 1:
            # Resize frame of video to scale size for faster face recognition processing
            rgb_small_frame = cv2.resize(frame, (0, 0), fx=scale, fy=scale)

        else:
            rgb_small_frame = frame

        with timer("PROCESS RAW FRAME {}".format(frame_obj["frame_num"])):
            # Find all the faces and face encodings in the current frame of video
            with timer("Locations in frame"):
                face_locations = np.array(
                    face_recognition.face_locations(rgb_small_frame))
                face_locations_dict = np_to_json(face_locations,
                                                 prefix_name="face_locations")

            with timer("Encodings in frame"):
                face_encodings = np.array(
                    face_recognition.face_encodings(rgb_small_frame,
                                                    face_locations))
                face_encodings_dict = np_to_json(face_encodings,
                                                 prefix_name="face_encodings")

        frame_obj.update(face_locations_dict)
        frame_obj.update(face_encodings_dict)

        return frame_obj
    def get_face_object(frame_obj, query_faces_data, scale=1.0):
        """Match query faces with detected faces in the frame, if matched put box and a tag.
        :param frame_obj: frame dictionary with frame information, frame, face encodings, locations.
        :param query_faces_data: message from query face topic, contains encoding and names of faces.
        Other way was to broadcast raw image and calculate encodings here.
        :param scale: to scale up as 1/scale, if in pre processing frames were scaled down for speedup.
        :return: A dict with modified frame, i.e. bounded box drawn around detected persons face.
        """

        # get frame from message
        frame = np_from_json(frame_obj,
                             prefix_name=ORIGINAL_PREFIX)  # frame_obj = json
        # get processed info from message
        face_locations = np_from_json(frame_obj, prefix_name="face_locations")
        face_encodings = np_from_json(frame_obj, prefix_name="face_encodings")
        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        frame = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_BGR2RGB)

        # get info entered by user through UI
        known_face_encodings = np_from_json(
            query_faces_data,
            prefix_name="known_face_encodings").tolist()  # (n, 128)
        known_faces = np_from_json(query_faces_data,
                                   prefix_name="known_faces").tolist()  # (n, )

        with timer("\nFACE RECOGNITION {}\n".format(frame_obj["frame_num"])):

            # Faces found in this image
            face_names = []
            with timer("Total Match time"):
                for i, face_encoding in enumerate(face_encodings):
                    # See if the face is a match for the known face(s)
                    with timer("Match {}th face time".format(i)):
                        matches = face_recognition.compare_faces(
                            known_face_encodings, face_encoding)

                    name = "Unknown"
                    # If a match was found in known_face_encodings, just use the first one.
                    if True in matches:
                        first_match_index = matches.index(True)
                        name = known_faces[first_match_index]

                    face_names.append(name.title())

            # Mark the results for this frame
            for (top, right, bottom,
                 left), name in zip(face_locations, face_names):
                # Scale back up face locations since the frame we detected in was scaled to 1/4 size

                if scale != 1:
                    top *= int(1 / scale)
                    right *= int(1 / scale)
                    bottom *= int(1 / scale)
                    left *= int(1 / scale)

                if name == "Unknown":
                    color = (0, 0, 255)  # blue
                else:
                    color = (255, 0, 0)  # red

                # Draw a box around the face
                cv2.rectangle(frame, (left, top), (right, bottom), color, 2)

                # Draw a label with a name below the face
                cv2.rectangle(frame, (left, bottom - 21), (right, bottom),
                              color, cv2.FILLED)
                cv2.putText(frame, name, (left + 6, bottom - 6),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)

        frame = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_BGR2RGB)
        frame_dict = np_to_json(frame, prefix_name=PREDICTED_PREFIX)
        prediction = None
        if face_names:
            prediction = face_names[0]

        result = {
            "prediction": prediction,
            "predict_time": str(time.time()),
            "latency": str(time.time() - int(frame_obj["timestamp"]))
        }

        frame_obj.update(frame_dict)  # update frame with prediction
        result.update(frame_obj)  # add prediction results

        return result