def test_image(image_to_check,
               known_names,
               known_face_encodings,
               tolerance=0.6,
               show_distance=False):
    unknown_image = face_recognition.load_image_file(image_to_check)

    # Scale down image if it's giant so things run a little faster
    if max(unknown_image.shape) > 1600:
        pil_img = PIL.Image.fromarray(unknown_image)
        pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS)
        unknown_image = np.array(pil_img)

    unknown_encodings = face_recognition.face_encodings(unknown_image)

    for unknown_encoding in unknown_encodings:
        distances = face_recognition.face_distance(known_face_encodings,
                                                   unknown_encoding)
        result = list(distances <= tolerance)

        #if True in result:
        #[print_result(image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match]
        #else:
        #print_result(image_to_check, "unknown_person", None, show_distance)

    if not unknown_encodings:
        # print out fact that no faces were found in image
        print_result(image_to_check, "no_persons_found", None, show_distance)
def test_image(known_names, known_face_encodings):
    cam = cv2.VideoCapture(0)
    ret, image_to_check = cam.read()

    cam.release()
    cv2.destroyAllWindows()

    unknown_image = image_to_check

    # Scale down image if it's giant so things run a little faster
    if max(unknown_image.shape) > 1600:
        pil_img = PIL.Image.fromarray(unknown_image)
        pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS)
        unknown_image = np.array(pil_img)

    unknown_encodings = face_recognition.face_encodings(unknown_image)

    for unknown_encoding in unknown_encodings:
        distances = face_recognition.face_distance(known_face_encodings,
                                                   unknown_encoding)
        result = list(distances <= 0.5625)  #0.5625 is tolerance

        if True in result:
            continue
        else:
            # No match
            sys.exit()

    if not unknown_encodings:
        # No faces were found in image
        sys.exit()
def sendVector2(id, status):
    photo = load_image_file('test.jpg')
    face_vectors = face_encodings(photo)
    face = face_vectors[0]
    face = 'data:' + str(face)
    function = 'function:' + str(status)
    userid = 'id:' + str(id)
    addr = ('122.51.26.166', 22222)
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.connect(addr)
    msg = userid + '\r\n' + function + '\r\n' + face
    msg = str.encode(msg)
    print(msg)
    sock.send(msg)
    feedback = sock.recv(1024).decode()
    print(feedback)
    eel.alert_feedback(feedback)
    sock.close()
def scan_known_people(known_people_folder):
    known_names = []
    known_face_encodings = []

    for file in image_files_in_folder(known_people_folder):
        basename = os.path.splitext(os.path.basename(file))[0]
        img = face_recognition.load_image_file(file)
        encodings = face_recognition.face_encodings(img)

        if len(encodings) > 1:
            print(
                "WARNING: More than one face found in {}. Only considering the first face."
                .format(file))

        if len(encodings) == 0:
            print("WARNING: No faces found in {}. Ignoring file.".format(file))
        else:
            known_names.append(basename)
            known_face_encodings.append(encodings[0])

    return known_names, known_face_encodings
def test_image(image_to_check,
               known_names,
               known_face_encodings,
               tolerance=0.6,
               show_distance=False):
    unknown_image = face_recognition.load_image_file(image_to_check)

    if max(unknown_image.shape) > 1600:

        pil_img = PIL.Image.fromarray(unknown_image)
        pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS)
        unknown_image = np.array(pil_img)

    unknown_encodings = face_recognition.face_encodings(unknown_image)
    faces = []
    for unknown_encoding in unknown_encodings:
        distances = face_recognition.face_distance(known_face_encodings,
                                                   unknown_encoding)
        result = list(distances <= tolerance)
        res = []
        if True in result:
            [
                res.append(
                    print_result(image_to_check, name, distance,
                                 show_distance)) for is_match, name, distance
                in zip(result, known_names, distances) if is_match
            ]
        else:
            res.append(
                print_result(image_to_check, "unknown", None, show_distance))
        mat = defaultdict(int)
        mat = Counter(res)
        res = max(mat.items(), key=lambda x: x[1])
        faces.append(res[0])

    if not unknown_encodings:

        print_result(image_to_check, "no_persons_found", None, show_distance)
    return faces
Beispiel #6
0
def get_face_encoding_in_image(file_stream):
    # Load the uploaded image file
    img = api.load_image_file(file_stream)
    # print(img)
    # Get face encodings for any faces in the uploaded image
    unknown_face_encodings = api.face_encodings(img)
    # print(unknown_face_encodings)
    face_encoding = None
    face_found = False

    if len(unknown_face_encodings) > 0:
        face_found = True
        face_encoding = str(list(unknown_face_encodings[0]))
        print(face_encoding)

    # Return the result as json
    result = {
        "face_found_in_image": face_found,
        "unknown_face_encodings": face_encoding
    }
    # print(result['unknown_face_encodings'])
    return result
Beispiel #7
0
def test_image(image_to_check, known_names, known_face_encodings):
    unknown_image = face_recognition.load_image_file(image_to_check)

    # Scale down image if it's giant so things run a little faster
    if unknown_image.shape[1] > 1600:
        scale_factor = 1600.0 / unknown_image.shape[1]
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            unknown_image = scipy.misc.imresize(unknown_image, scale_factor)

    unknown_encodings = face_recognition.face_encodings(unknown_image)

    for unknown_encoding in unknown_encodings:
        result = face_recognition.compare_faces(known_face_encodings,
                                                unknown_encoding)

        if True in result:
            [
                print("{},{}".format(image_to_check, name))
                for is_match, name in zip(result, known_names) if is_match
            ]
        else:
            print("{},unknown_person".format(image_to_check))
def sendVector(base64_data, id, status):
    base64_data = base64_data.split(',')[1]
    base64_data = str.encode(base64_data)
    data = base64.b64decode(base64_data)
    file = open('prtsc.png', 'wb')
    file.write(data)
    file.close()
    photo = load_image_file('prtsc.png')
    face_vectors = face_encodings(photo)
    face = face_vectors[0]
    face = 'data:' + str(face)
    function = 'function:' + str(status)
    userid = 'id:' + str(id)
    addr = ('122.51.26.166', 22222)
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.connect(addr)
    msg = userid + '\r\n' + function + '\r\n' + face
    msg = str.encode(msg)
    print(msg)
    sock.send(msg)
    feedback = sock.recv(1024).decode()
    print(feedback)
    eel.alert_feedback(feedback)
    sock.close()
Beispiel #9
0
while True:
    # Grab a single frame of video
    ret, frame = video_capture.read()

    # Resize frame of video to 1/4 size for faster face recognition processing
    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

    # Convert the image from BGR color (which OpenCV uses) to RGB color (which api uses)
    rgb_small_frame = small_frame[:, :, ::-1]

    # Only process every other frame of video to save time
    if process_this_frame:
        # Find all the faces and face encodings in the current frame of video
        face_locations = api.face_locations(rgb_small_frame)
        face_encodings = api.face_encodings(rgb_small_frame, face_locations)

        face_names = []
        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            matches = api.compare_faces(known_face_encodings, face_encoding)
            name = "无匹配"

            # If a match was found in known_face_encodings, just use the first one.
            if True in matches:
                first_match_index = matches.index(True)
                name = known_face_names[first_match_index]

            face_names.append(name)

    process_this_frame = not process_this_frame
Beispiel #10
0
#
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.

# Open the input movie file
input_movie = cv2.VideoCapture("got.mp4")
length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT))

# Create an output movie file (make sure resolution/frame rate matches input video!)
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
output_movie = cv2.VideoWriter('output.mp4', 0x7634706d, 20.0, (640, 360))

# Load some sample pictures and learn how to recognize them.
dst_image = face_recognition.load_image_file("DST.jpg")
dst_face_encoding = face_recognition.face_encodings(dst_image)[0]

js_image = face_recognition.load_image_file("JS.jpg")
js_face_encoding = face_recognition.face_encodings(js_image)[0]

tl_image = face_recognition.load_image_file("TL.jpg")
tl_face_encoding = face_recognition.face_encodings(tl_image)[0]

sd_image = face_recognition.load_image_file("SD.jpg")
sd_face_encoding = face_recognition.face_encodings(sd_image)[0]

known_faces = [
    dst_face_encoding, js_face_encoding, tl_face_encoding, sd_face_encoding
]

# Initialize some variables
Beispiel #11
0
X = []
y = []
verbose = False
for class_dir in os.listdir(train_dir):
    if not os.path.isdir(os.path.join(train_dir, class_dir)):
        continue

    print('starting {}.format', class_dir)

    for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)):
        image = face_recognition.load_image_file(img_path)
        face_bounding_boxes = face_recognition.face_locations(image)

        if len(face_bounding_boxes) != 1:
            # If there are no people (or too many people) in a training image, skip the image.
            if verbose:
                print("Image {} not suitable for training: {}".format(
                    img_path,
                    "Didn't find a face" if len(face_bounding_boxes) < 1 else
                    "Found more than one face"))
        else:
            # Add face encoding for current image to the training set
            X.append(
                face_recognition.face_encodings(
                    image, known_face_locations=face_bounding_boxes)[0])
            y.append(class_dir)
#The prediction part

# Use the KNN model to find the best matches for the test face
#    closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
#   are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]