Ejemplo n.º 1
0
        def match_fono(location_, image_):
            top, right, bottom, left = location_
            # You can access the actual face itself like this:
            face_image = image_[top:bottom, left:right]
            try:
                unknown_face_encoding = face_recognition.face_encodings(face_image, num_jitters=NUM_JITTERS)[0]
                recos = face_recognition.compare_faces(
                    self.known_faces_list, unknown_face_encoding, tolerance=TOLERANCE)
            except:
                recos = []

            unk = "UNK{n}@unk{n}@00@"
            z = hash(location_)

            face_dict = {
                self.known_faces[index] if is_there else unk.format(n=z): location_ for index, is_there in
                enumerate(recos)}
            # face_dict = {
            #     self.known_faces[index]: location_ for index, is_there in
            #     enumerate(recos) if is_there}
            # print(
            #     "location Locs: {}  Top: {}, Left: {}, Bottom: {}, Right: {}"
            #     .format(face_dict, top, left, bottom, right))
            if any(("@unk" not in name) or ("@Fono-" in name) for name in face_dict.keys()):
                face_dict = {name: location for name, location in face_dict.items()
                             if ("@unk" not in name) or ("@Fono-" in name)}
            else:
                if face_dict.keys():
                    pil_image = Imger.fromarray(face_image)
                    pil_image.save("fono17_2/@Fono-{nome}.png".format(nome=face_dict.keys()[0]), "PNG")

            print("location Locs: {}  ".format(face_dict))
            return face_dict
Ejemplo n.º 2
0
def recognize_face(face_file, known_faces_dir):

    os.chdir(known_faces_dir)

    names = []
    encoded_faces = []

    for file in glob.glob("*.jpg"):

        # try to open and read cached data
        # if there is no *.enc file, generate it
        enc_file_name = file[0:-4] + ".enc"
        try:
            encoded = _utils.get_encoded(file, enc_file_name)

        except _utils.FaceNotFoundError:
            continue

        names.append(file)
        encoded_faces.append(encoded)

    unknown_face_file = face_recognition.load_image_file(face_file)

    try:
        unknown_face_encoded = face_recognition.face_encodings(unknown_face_file)[0]
    except IndexError:
        return str()

    results = face_recognition.compare_faces(encoded_faces, unknown_face_encoded)

    for i in range(0, len(results)):
        if results[i]:
            return names[i]

    return str()
Ejemplo n.º 3
0
def face_recognition(frame, drawboxes=True):
    """ Perform face recognition using face_recognition package
    """
    global database, facedatabase, facedatabase_encodings, fraction

    # Define standard found state
    found = False

    # Initialize face database if not already initialized
    if (not database) or (not facedatabase) or (not facedatabase_encodings):
        database = list()
        # Search for known faces in faces/ directory
        for (_, _, filenames) in os.walk('faces'):
            database.extend(filenames)
            break
        # Populate face database and generate face encodings
        facedatabase = [fc.load_image_file(os.path.join('faces', name)) for name in database]
        facedatabase_encodings = [fc.face_encodings(face)[0] for face in facedatabase]
    
    # Create a resized copy of the frame in order to speed up processing
    small_frame = cv2.resize(frame, (0, 0), fx=fraction, fy=fraction)

    # Detect faces and generate their encodings
    face_locations = fc.face_locations(small_frame)
    face_encodings = fc.face_encodings(small_frame, face_locations)

    # Recognize faces if found
    if len(face_encodings) > 0:

        found = True

        # Recognize faces and determine their names
        face_names = []
        for face_encoding in face_encodings:
            match = fc.compare_faces(facedatabase_encodings, face_encoding, tolerance=0.5)
            try: name = database[match.index(True)].split('.')[0]
            except ValueError: name = "Unknown"
            face_names.append(name)
        
        # Draw a rectangle and name around recognized faces if required
        if drawboxes:
            for (top, right, bottom, left), name in zip(face_locations, face_names):
                if name != "Unknown":
                    top = int((1/fraction)*top - 16)
                    right = int((1/fraction)*right + 16)
                    bottom = int((1/fraction)*bottom + 16)
                    left = int((1/fraction)*left - 16)
                    cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
                    cv2.rectangle(frame, (left-1, top - 20), (max(right+1, left+12*len(name)), top), (0, 0, 255), cv2.FILLED)
                    font = cv2.FONT_HERSHEY_DUPLEX
                    cv2.putText(frame, name, (left + 6, top - 6), font, 0.5, (255, 255, 255), 1)
    
    # Return frame and found state
    return frame, found
def detect_faces_in_image(file_stream):
    # Pre-calculated face encoding of Obama generated with face_recognition.face_encodings(img)
    known_face_encoding = [-0.09634063,  0.12095481, -0.00436332, -0.07643753,  0.0080383,
                            0.01902981, -0.07184699, -0.09383309,  0.18518871, -0.09588896,
                            0.23951106,  0.0986533 , -0.22114635, -0.1363683 ,  0.04405268,
                            0.11574756, -0.19899382, -0.09597053, -0.11969153, -0.12277931,
                            0.03416885, -0.00267565,  0.09203379,  0.04713435, -0.12731361,
                           -0.35371891, -0.0503444 , -0.17841317, -0.00310897, -0.09844551,
                           -0.06910533, -0.00503746, -0.18466514, -0.09851682,  0.02903969,
                           -0.02174894,  0.02261871,  0.0032102 ,  0.20312519,  0.02999607,
                           -0.11646006,  0.09432904,  0.02774341,  0.22102901,  0.26725179,
                            0.06896867, -0.00490024, -0.09441824,  0.11115381, -0.22592428,
                            0.06230862,  0.16559327,  0.06232892,  0.03458837,  0.09459756,
                           -0.18777156,  0.00654241,  0.08582542, -0.13578284,  0.0150229 ,
                            0.00670836, -0.08195844, -0.04346499,  0.03347827,  0.20310158,
                            0.09987706, -0.12370517, -0.06683611,  0.12704916, -0.02160804,
                            0.00984683,  0.00766284, -0.18980607, -0.19641446, -0.22800779,
                            0.09010898,  0.39178532,  0.18818057, -0.20875394,  0.03097027,
                           -0.21300618,  0.02532415,  0.07938635,  0.01000703, -0.07719778,
                           -0.12651891, -0.04318593,  0.06219772,  0.09163868,  0.05039065,
                           -0.04922386,  0.21839413, -0.02394437,  0.06173781,  0.0292527 ,
                            0.06160797, -0.15553983, -0.02440624, -0.17509389, -0.0630486 ,
                            0.01428208, -0.03637431,  0.03971229,  0.13983178, -0.23006812,
                            0.04999552,  0.0108454 , -0.03970895,  0.02501768,  0.08157793,
                           -0.03224047, -0.04502571,  0.0556995 , -0.24374914,  0.25514284,
                            0.24795187,  0.04060191,  0.17597422,  0.07966681,  0.01920104,
                           -0.01194376, -0.02300822, -0.17204897, -0.0596558 ,  0.05307484,
                            0.07417042,  0.07126575,  0.00209804]

    # Load the uploaded image file
    img = face_recognition.load_image_file(file_stream)
    # Get face encodings for any faces in the uploaded image
    unknown_face_encodings = face_recognition.face_encodings(img)

    face_found = False
    is_obama = False

    if len(unknown_face_encodings) > 0:
        face_found = True
        # See if the first face in the uploaded image matches the known face of Obama
        match_results = face_recognition.compare_faces([known_face_encoding], unknown_face_encodings[0])
        if match_results[0]:
            is_obama = True

    # Return the result as json
    result = {
        "face_found_in_image": face_found,
        "is_picture_of_obama": is_obama
    }
    return jsonify(result)
def faceRecognitionFromPicture(cvframe):
    print("---- Recognized Started ----")
    small_frame = cv2.resize(cvframe, (0, 0), fx=0.25, fy=0.25)

    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    small_rgb_frame = small_frame[:, :, ::-1]

    # get face location
    face_locations = face_recognition.face_locations(small_rgb_frame)
    print("- Face location scan completed")

    face_encodings = face_recognition.face_encodings(
        small_rgb_frame, face_locations)

    face_names = []
    for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
        matches = face_recognition.compare_faces(
            known_face_encodings, face_encoding)
        name = "not recognized"  # default name is not recognized

        # If a match was found in known_face_encodings, just use the first one.
        if True in matches:
            first_match_index = matches.index(True)
            name = known_face_names[first_match_index]

        face_names.append(name)
    
        
    print("- Face Locations:")
    # print face data
    print(*face_locations, sep='\n')
    print(*face_names, sep='\n')
    print("- Face name searching completed")
    # draw face rectangle and name on current frame
    drawFaceOnImage(cvframe, face_locations, face_names)
    # Label string
    faceNames = ''.join(face_names)
    count = str(len(face_locations))
    location = ','.join([str(i) for i in face_locations])
    return_string = "\nNames: "+faceNames + \
        "\nFace Count: "+count+"\nLocations: "+location+"\n"
    lblTag["text"] = return_string
    print("---- Recognized Completed ----")
Ejemplo n.º 6
0
    def process_image(self, image):
        """Process image."""
        # pylint: disable=import-error
        import face_recognition

        fak_file = io.BytesIO(image)
        fak_file.name = 'snapshot.jpg'
        fak_file.seek(0)

        image = face_recognition.load_image_file(fak_file)
        unknowns = face_recognition.face_encodings(image)

        found = []
        for unknown_face in unknowns:
            for name, face in self._faces.items():
                result = face_recognition.compare_faces([face], unknown_face)
                if result[0]:
                    found.append({
                        ATTR_NAME: name
                    })

        self.process_faces(found, len(unknowns))
Ejemplo n.º 7
0
def prepare_images():
    # 0. safety check
    if not os.path.exists(commons.IMAGE_DIR):
        print("ERROR: IMAGE_DIR {} not exist.".format(commons.IMAGE_DIR))

    if not os.path.exists(commons.TRAIN_IMAGES_DIR):
        os.mkdir(commons.TRAIN_IMAGES_DIR)

    if not os.path.exists(commons.TRAIN_IMAGES_DONE_DIR):
        os.mkdir(commons.TRAIN_IMAGES_DONE_DIR)

    movies = commons.load_movies()
    images = [
        x for x in sorted(os.listdir(commons.IMAGE_DIR))
        if len(x) > 4 and x[-4:] == ".jpg"
    ]
    errors = []

    # 1. categorize images into imdb_id -> character_id -> list of images belonging to that id
    print("Categorizing images...")
    imagesof = defaultdict(lambda: defaultdict(list))
    for x in images:
        # already prepared
        if os.path.exists(os.path.join(commons.TRAIN_IMAGES_DIR, x)):
            continue

        imdb_id, character_id, timestamp = x[:-4].split('-')
        character_id = int(character_id)
        imagesof[imdb_id][character_id].append(x)

    # 2. locate face for each image with black edge cropped
    for imdb_id in imagesof:
        print("Prepare images for <{}> {}...".format(imdb_id,
                                                     movies[imdb_id].name))
        if os.path.exists(os.path.join(commons.TRAIN_IMAGES_DONE_DIR,
                                       imdb_id)):
            continue

        character_encodings = commons.get_characters(imdb_id)

        for character_id in imagesof[imdb_id]:
            for x in imagesof[imdb_id][character_id]:
                try:
                    image = cv2.imread(os.path.join(commons.IMAGE_DIR, x))
                    image = remove_black_edge(image)

                    rgb_image = image[:, :, ::-1]
                    face_locations = face_recognition.face_locations(rgb_image)
                    encodings = face_recognition.face_encodings(
                        rgb_image, known_face_locations=face_locations)
                except Exception:
                    print("ERROR {}".format(e))
                    errors.append(e)

                for i in range(len(face_locations)):
                    try:
                        result = face_recognition.compare_faces(
                            character_encodings[character_id], encodings[i])
                        if face.is_same_person(result):
                            height, _, _ = image.shape
                            _, right, _, left = face_locations[i]
                            mid = int((left + right) / 2)

                            height = height if height % 2 == 0 else height - 1

                            x0 = max(0, mid - int(height / 2))
                            x1 = x0 + height

                            rect_image = image[0:height, x0:x1]
                            cv2.imwrite(
                                os.path.join(commons.TRAIN_IMAGES_DIR, x),
                                rect_image)

                    except Exception as e:
                        print("ERROR {}".format(e))
                        errors.append(e)

        open(os.path.join(commons.TRAIN_IMAGES_DONE_DIR, imdb_id), "a").close()

    for e in errors:
        print(e)
Ejemplo n.º 8
0
    r = frame.shape[1] / float(rgb.shape[1])

    # detect the (x, y)-coordinates of the bounding boxes
    # corresponding to each face in the input frame, then compute
    # the facial embeddings for each face
    boxes = face_recognition.face_locations(rgb,
                                            model=args["detection_method"])
    encodings = face_recognition.face_encodings(rgb, boxes)
    names = []

    # loop over the facial embeddings
    for encoding in encodings:
        # attempt to match each face in the input image to our known
        # encodings
        matches = face_recognition.compare_faces(data["encodings"],
                                                 encoding,
                                                 tolerance=0.4)
        name = "Unknown"

        # check to see if we have found a match
        if True in matches:
            # find the indexes of all matched faces then initialize a
            # dictionary to count the total number of times each face
            # was matched
            matchedIdxs = [i for (i, b) in enumerate(matches) if b]
            counts = {}

            # loop over the matched indexes and maintain a count for
            # each recognized face face
            for i in matchedIdxs:
                name = data["names"][i]
    # Now since we know loctions, we can pass them to face_encodings as second argument
    # Without that it will search for faces once again slowing down whole process
    encodings = face_recognition.face_encodings(image, locations)

    # We passed our image through face_locations and face_encodings, so we can modify it
    # First we need to convert it from RGB to BGR as we are going to work with cv2
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

    # But this time we assume that there might be more faces in an image - we can find faces of dirrerent people
    print(f', found {len(encodings)} face(s)')
    for face_encoding, face_location in zip(encodings, locations):

        # We use compare_faces (but might use face_distance as well)
        # Returns array of True/False values in order of passed known_faces
        results = face_recognition.compare_faces(known_faces, face_encoding, TOLERANCE)

        # Since order is being preserved, we check if any face was found then grab index
        # then label (name) of first matching known face withing a tolerance
        match = None
        if True in results:  # If at least one is true, get a name of first of found labels
            match = known_names[results.index(True)]
            print(f' - {match} from {results}')

            # Each location contains positions in order: top, right, bottom, left
            top_left = (face_location[3], face_location[0])
            bottom_right = (face_location[1], face_location[2])

            # Get color by name using our fancy function
            color = name_to_color(match)
Ejemplo n.º 10
0
def runImageRec(known_face_encodings, known_face_names, start):
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True
    print(len(known_face_names))

    with open('dataset_faces.dat', 'rb') as f:
        try:
            all_face_encodings = pickle.load(f)
            known_face_names = list(all_face_encodings.keys())
            known_face_encodings = np.array(list(all_face_encodings.values()))

        except EOFError:
            pass

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()
        frame = cv2.flip(frame, 1)

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        if process_this_frame:
            end = time.time()
            if (end - start) > 3:
                print((end - start))
                look_for_eye()
            print((end - start))

            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []

            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(
                    known_face_encodings, face_encoding)
                face_distances = face_recognition.face_distance(
                    known_face_encodings, face_encoding)
                try:
                    best_match_index = np.argmin(face_distances)
                except ValueError:
                    print(known_face_names)
                    print("in error name")
                    pass
                # # If a match was found in known_face_encodings, just use the first one.
                # if True in matches:
                #     first_match_index = matches.index(True)
                #     name = known_face_names[first_match_index]

                # Or instead, use the known face with the smallest distance to the new face

                try:
                    if matches[best_match_index]:
                        name = known_face_names[best_match_index]
                    else:
                        if video_capture.isOpened:
                            name = "unknown"
                            print("Enter name")

                        if name == "asking you":
                            #name = input()
                            image_path = r'/Users/JakeODonnell/Desktop/Projekt/Learning-python/faceLiveDetection/img/known/' + name + '.PNG'
                            directory = r'/Users/JakeODonnell/Desktop/Projekt/Learning-python/faceLiveDetection/img/known'
                            filename = name + '.PNG'
                            cv2.imwrite(image_path, frame)
                            all_face_encodings = {}
                            new_image = face_recognition.load_image_file(
                                "/Users/JakeODonnell/Desktop/Projekt/Learning-python/faceLiveDetection/img/known/"
                                + filename)
                            all_face_encodings[
                                name] = face_recognition.face_encodings(
                                    new_image)[0]
                            with open('dataset_faces.dat', 'wb') as f:
                                pickle.dump(all_face_encodings, f)
                            try:
                                os.remove(image_path)
                            except:
                                pass

                except UnboundLocalError:
                    print("in error")
                    if video_capture.isOpened:
                        name = "asking you"
                        print("Enter name")

                    if name == "sparr":
                        image_path = r'/Users/JakeODonnell/Desktop/Projekt/Learning-python/faceLiveDetection/img/known/' + name + '.PNG'
                        directory = r'/Users/JakeODonnell/Desktop/Projekt/Learning-python/faceLiveDetection/img/known'
                        filename = name + '.PNG'
                        cv2.imwrite(image_path, frame)
                        all_face_encodings = {}
                        new_image = face_recognition.load_image_file(
                            "/Users/JakeODonnell/Desktop/Projekt/Learning-python/faceLiveDetection/img/known/"
                            + filename)
                        all_face_encodings[
                            name] = face_recognition.face_encodings(
                                new_image)[0]

                        with open('dataset_faces.dat', 'wb') as f:
                            pickle.dump(all_face_encodings, f)

                        try:
                            os.remove(image_path)
                        except:
                            pass

                face_names.append(name)

        # DisplayFace processed
        for (top, right, bottom, left), name in zip(face_locations,
                                                    known_face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)
            # Only process every other frame of video to save time

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            roi_gray = gray[right:right + top, left:left + bottom]
            roi_color = frame[right:right + top, left:left + bottom]

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, 2, 5)

            eyes = eye_cascade.detectMultiScale(roi_gray)
            for (ex, ey, ew, eh) in eyes:
                cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),
                              (0, 255, 0), 2)

        # Display the resulting image
        cv2.imshow('Video', frame)
        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('a'):
            try:
                user_to_delete = str(input())
                del all_face_encodings[user_to_delete]
            except KeyError:
                print("{user} doesn't exist in database".format(
                    user=user_to_delete))
            break
Ejemplo n.º 11
0
b, g, r = frame.split()
rgb_frame = Image.merge("RGB", (r, g, b))

rgb_frame = np.array(rgb_frame)
#print(pix.shape)

# Find all the faces and face enqcodings in the frame of video
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)

# Loop through each face in this frame of video
for (top, right, bottom, left), face_encoding in zip(face_locations,
                                                     face_encodings):
    # See if the face is a match for the known face(s)
    matches = face_recognition.compare_faces(known_face_encodings,
                                             face_encoding,
                                             tolerance=0.508)

    print('MATCHES-FOUND-WITH:')

    total = 0

    for i in range(0, len(matches)):
        if (matches[i] == True):
            print('Name: ', person[known_face_names[i]][1])
            print('Rollno: ', person[known_face_names[i]][0])
            name = str(person[known_face_names[i]][1])
            roll = str(person[known_face_names[i]][0])
            x_co = left
            y_co = bottom + 10 + total * 33
            total = total + 1
Ejemplo n.º 12
0
    def process(self, image_search_path, show=True):
        shrink_scale = 2.0
        image_search = cv2.imread(image_search_path)
        image = cv2.resize(image_search, (0, 0), fx=1.0 / shrink_scale, fy=1.0 / shrink_scale)
        face_locations = face_recognition.face_locations(image)
        if len(face_locations) == 0:
            print('cannot find face in image', image_search_path)
            return None

        face_encodings = face_recognition.face_encodings(image, face_locations)

        face_names = []
        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            match = face_recognition.compare_faces(self.face_encodings, face_encoding)
            name = "Unknown"

            print(match.__class__)
            for idx in range(len(match)):
                if match[idx]:
                    name = self.face_names[idx]
            face_names.append(name)

        face_infos = []
        # Display the results
        for (top, right, bottom, left), name in zip(face_locations, face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= shrink_scale
            right *= shrink_scale
            bottom *= shrink_scale
            left *= shrink_scale

            locations = {'top': int(top), 'right': int(right), 'bottom': int(bottom), 'left': int(left)}
            face_name = {'name': name}

            face_info = {'location': locations, 'name': face_name}
            face_infos.append(face_info)

            left=int(left)
            top=int(top)
            right=int(right)
            bottom=int(bottom)
            # Draw a box around the face
            cv2.rectangle(image, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(image, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            text_org=(left+6,min(bottom-6,0))

            print('type(image)',type(image))
            print('type(name)',type(name),name)
            print('type(text_org)',type(text_org))
            print('type(font)',type(font))
            cv2.putText(img=image, text=name, org=text_org, fontFace=font, fontScale=1.0, color=(255, 255, 255), thickness=1)

        return_data = {'image_path': image_search_path, 'face_infos': face_infos, 'labeled_image':image}
        print(return_data)

        if show:
            #cv2.imshow('labeled image',image)
            #cv2.waitKey(0)
            #img = mpimg.imread('stinkbug.png')
            imgplot = plt.imshow(image)
            plt.show()

        return return_data
Ejemplo n.º 13
0
    def recognize(self, detections, image):
        """
        Main face recognition logic, it gets the incoming detection message and
        modifies the person labeled detections according to the face info.

        For example:
         (label) person, bounding_box -> (label) Mario, bounding_box of face

        Args:
        (DetectionArray) detections: detections array message from cob package
        (numpy.ndarray) image: incoming people image

        Returns:

        (numpy.ndarray): image with labels and bounding boxes
        (cob_perception_msgs/DetectionArray): detections with labeled faces

        """

        detections_out = DetectionArray()
        detections_out.header = detections.header

        for i, detection in enumerate(detections.detections):

            if detection.label == "person":

                x = int(detection.mask.roi.x * self.scaling_factor)
                y = int(detection.mask.roi.y * self.scaling_factor)
                width = int(detection.mask.roi.width * self.scaling_factor)
                height = int(detection.mask.roi.height * self.scaling_factor)
                score = detection.score

                try:
                    # Crop detection image
                    detection_image = image[x:x + width, y:y + height]

                    face_locations = fr.face_locations(detection_image)

                    face_features = fr.face_encodings(detection_image, \
                        face_locations)

                    for features, (top, right, bottom, left) in \
                        zip(face_features, face_locations):
                        matches = fr.compare_faces(self.database[0], features)

                        l = y + left
                        t = x + top
                        r = y + right
                        b = x + bottom

                        detection.label = "Unknown"

                        if True in matches:
                            ind = matches.index(True)
                            detection.label = self.database[1][ind]

                        # Modify the message
                        detection.mask.roi.x = l / self.scaling_factor
                        detection.mask.roi.y = t / self.scaling_factor
                        detection.mask.roi.width = (r -
                                                    l) / self.scaling_factor
                        detection.mask.roi.height = (b -
                                                     t) / self.scaling_factor

                        # Draw bounding boxes on current image

                        cv2.rectangle(image, (l, t), \
                        (r, b), (0, 0, 255), 2)

                        cv2.rectangle(image, (x, y), \
                        (x + width, y + height), (255, 0, 0), 3)

                        cv2.putText(image, detection.label, \
                        (l + 2, t + 2), \
                        cv2.FONT_HERSHEY_DUPLEX, 1.0, (0, 0, 0), 1)

                        detections_out.detections.append(detection)

                except Exception as e:
                    print e

        return (image, detections_out)
Ejemplo n.º 14
0
import cv2
import face_recognition

known_pic_of_ian = face_recognition.load_image_file("frame8.png")
unknown_pic = face_recognition.load_image_file("frame7.png")

ian_encoding = face_recognition.face_encodings(known_pic_of_ian)[0]
unknown_encoding = face_recognition.face_encodings(unknown_pic)[0]

comparison = face_recognition.compare_faces([ian_encoding], unknown_encoding)
print(comparison)
Ejemplo n.º 15
0
def detect_and_display(model, video_capture, face_detector, open_eyes_detector, left_eye_detector, right_eye_detector, data, eyes_detected):
        frame = video_capture.read()
        # resize the frame
        frame = cv2.resize(frame, (0, 0), fx=0.6, fy=0.6)

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        
        # Detect faces
        faces = face_detector.detectMultiScale(
            gray,
            scaleFactor=1.2,
            minNeighbors=5,
            minSize=(50, 50),
            flags=cv2.CASCADE_SCALE_IMAGE
        )

        # for each detected face
        for (x,y,w,h) in faces:
            # Encode the face into a 128-d embeddings vector
            encoding = face_recognition.face_encodings(rgb, [(y, x+w, y+h, x)])[0]

            # Compare the vector with all known faces encodings
            matches = face_recognition.compare_faces(data["encodings"], encoding)

            # For now we don't know the person name
            name = "Unknown"

            # If there is at least one match:
            if True in matches:
                matchedIdxs = [i for (i, b) in enumerate(matches) if b]
                counts = {}
                for i in matchedIdxs:
                    name = data["names"][i]
                    counts[name] = counts.get(name, 0) + 1


                # determine the recognized face with the largest number of votes
                name = max(counts, key=counts.get)

            face = frame[y:y+h,x:x+w]
            gray_face = gray[y:y+h,x:x+w]

            eyes = []
            
            # Eyes detection
            # check first if eyes are open (with glasses taking into account)
            open_eyes_glasses = open_eyes_detector.detectMultiScale(
                gray_face,
                scaleFactor=1.1,
                minNeighbors=5,
                minSize=(30, 30),
                flags = cv2.CASCADE_SCALE_IMAGE
            )
            # if open_eyes_glasses detect eyes then they are open 
            if len(open_eyes_glasses) == 2:
                eyes_detected[name]+= '1'
                for (ex,ey,ew,eh) in open_eyes_glasses:
                    cv2.rectangle(face,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
            
            # otherwise try detecting eyes using left and right_eye_detector
            # which can detect open and closed eyes                
            else:
                # separate the face into left and right sides
                left_face = frame[y:y+h, x+int(w/2):x+w]
                left_face_gray = gray[y:y+h, x+int(w/2):x+w]

                right_face = frame[y:y+h, x:x+int(w/2)]
                right_face_gray = gray[y:y+h, x:x+int(w/2)]

                # Detect the left eye
                left_eye = left_eye_detector.detectMultiScale(
                    left_face_gray,
                    scaleFactor=1.1,
                    minNeighbors=5,
                    minSize=(30, 30),
                    flags = cv2.CASCADE_SCALE_IMAGE
                )

                # Detect the right eye
                right_eye = right_eye_detector.detectMultiScale(
                    right_face_gray,
                    scaleFactor=1.1,
                    minNeighbors=5,
                    minSize=(30, 30),
                    flags = cv2.CASCADE_SCALE_IMAGE
                )

                eye_status = '1' # we suppose the eyes are open

                # For each eye check weather the eye is closed.
                # If one is closed we conclude the eyes are closed
                for (ex,ey,ew,eh) in right_eye:
                    color = (0,255,0)
                    pred = predict(right_face[ey:ey+eh,ex:ex+ew],model)
                    if pred == 'closed':
                        eye_status='0'
                        color = (0,0,255)
                    cv2.rectangle(right_face,(ex,ey),(ex+ew,ey+eh),color,2)
                for (ex,ey,ew,eh) in left_eye:
                    color = (0,255,0)
                    pred = predict(left_face[ey:ey+eh,ex:ex+ew],model)
                    if pred == 'closed':
                        eye_status='0'
                        color = (0,0,255)
                    cv2.rectangle(left_face,(ex,ey),(ex+ew,ey+eh),color,2)
                eyes_detected[name] += eye_status

            # Each time, we check if the person has blinked
            # If yes, we display its name
            if isBlinking(eyes_detected[name],1):
                cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 1)
                # Display name
                y = y - 15 if y - 15 > 15 else y + 15
                cv2.putText(frame, name, (x, y), cv2.FONT_HERSHEY_SIMPLEX,0.75, (255,255,255), 2)

        return frame
Ejemplo n.º 16
0
    def face_start(self):
        if self.refresh_facestart_onoff == False:
            return
        # 渲染右上角的时间与日期
        self.label_right_top_time.setText(
            time.strftime("%H:%M", time.localtime(time.time())))
        self.label_right_top_date.setText(
            '[' + time.strftime("%Y-%m-%d", time.localtime(time.time())) + ']')

        #if(int(time.time()) - self.avatar_time > 3):
        #    self.verticalLayoutWidget.hide()
        if self.onoff_face:

            ############# 判断是否进行休眠 ##############
            if self.my_sleep_onoff:
                self.my_sleep_end = int(time.time())
                if (int(time.time()) -
                        self.my_sleep_front) > self.my_sleep_time:
                    # 休眠状态时,提示唤醒tips
                    self.label_shadow_tips.show()
                    self.my_sleep_onoff = False
                    self.timer.stop()
                    self.timer.start(2000)

            self.onoff_face = False
            # 判断摄像头是否插入,如果未插入,需要重新检测,检测的同时关闭入口
            # 直到下次读取摄像画面失败再开启
            if self.no_video == True:
                self.no_video = False
                self.video_capture = cv2.VideoCapture(0)
            # 读取摄像头画面
            ret, frame = self.video_capture.read()
            if ret == False:
                self.no_video = True
                self.onoff_face = True
                return

            # 画面水平翻转
            frame = cv2.flip(frame, 1)
            # 改变摄像头图像的大小,图像小,所做的计算就少
            #             small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
            small_frame = cv2.resize(frame, (0, 0),
                                     fx=self.facefx,
                                     fy=self.facefy)
            #cv2.imwrite('./messigray.png',small_frame)
            # small_frame = frame
            # Only process every other frame of video to save time
            if self.process_this_frame:
                # 根据encoding来判断是不是同一个人,是就输出true,不是为flase
                # face_locations为摄像头中捕捉到的人像的坐标数组
                self.face_locations = face_recognition.face_locations(
                    small_frame)
                # 通过坐标提取摄像头中的人像数据
                self.face_encodings = face_recognition.face_encodings(
                    small_frame, self.face_locations)
                # 存储识别到的人像的姓名,或者其它数据,类型为数组
                self.face_names = []
                name = ""

                for face_encoding in self.face_encodings:
                    # 如果程序走到这一步,说明摄像头中出现了人像,此时需要更新休眠期间人像判断的时间值
                    if self.my_sleep_onoff == False:
                        self.my_sleep_onoff = True
                        self.timer.stop()
                        self.timer.start(30)
                        # 唤醒状态时,隐藏tips提示
                        self.label_shadow_tips.hide()
                    # compare_faces方法对比查找人像函数(找相似),
                    # 参数1表示要查找的(本地的)人像,
                    # 参数2表示从摄像头取出的带有人像(或者是没有都可以,如果没有,那么就自动会返回False)
                    # 参数3 tolerance表示识别人像的精度,数值越小,进度越高,参数越大精度越小,识别越模糊,最大为1,最小为0
                    match = face_recognition.compare_faces(
                        self.person, face_encoding, tolerance=self.tolerance)
                    #                     print(match)

                    self.my_sleep_front = int(time.time())
                    for i in range(len(match)):

                        if match[i]:
                            self.names['is_signto%s' % i] += 1
                            # 获取当前时间
                            self.names['time_last%s' % i] = int(time.time())
                            # 存放一些局部临时变量,方便后面的多次使用
                            this_uid = self.user_info['result'][i]['uid']
                            this_name = self.user_info['result'][i]['name']
                            this_sex = self.user_info['result'][i]['sex']
                            this_age = str(self.user_info['result'][i]['age'])
                            this_avatar = self.user_info['result'][i]['avatar']
                            this_rank = self.user_info['result'][i]['rank']
                            this_department = self.user_info['result'][i][
                                'department']
                            this_slogan = self.user_info['result'][i]['slogan']
                            # 处理一下name值,此时它不再是只包含姓名数据,它代表了将要实时显示的所有信息
                            name = this_name +\
                                "\n"+\
                                this_department
                            # 连续累计识别成功一定次数大于某个值才给予成功签到
                            if self.names['is_signto%s' % i] > self.rectimes:
                                # 第一次签到的时间和现在相差小于一定时间段内,无法再次签到
                                if (self.names['time_last%s' % i] -
                                        self.names['time_first%s' %
                                                   i]) > self.sign_between:
                                    self.names['is_signto%s' % i] = 0
                                    # 签到成功,存数据,存一些全局变量,方便函数内多次调用(一下基本上都是存的签到页面需要用的的数据)
                                    self.this_name = this_name
                                    self.this_avatar = this_avatar
                                    self.this_rank = this_rank
                                    self.this_department = this_department

                                    self.this_time = time.strftime(
                                        "%H:%M",
                                        time.localtime(
                                            self.names['time_last%s' % i]))
                                    self.this_date = time.strftime(
                                        "%Y-%m-%d",
                                        time.localtime(
                                            self.names['time_last%s' % i]))

                                    # 读取签到成功的音频文件
                                    pygame.mixer.init(frequency=15500,
                                                      size=-16,
                                                      channels=4)
                                    track = pygame.mixer.music.load(
                                        "./audio/" + this_uid + ".mp3")
                                    pygame.mixer.music.play()
                                    # 签到成功,调用签到列表的动效函数
                                    self.move_sign_list_pos()
                                    #self.textBrowser.append(this_rank+">"+this_name+":签到成功")
                                    self.label_2.setStyleSheet(
                                        "border-image: url(" + this_avatar +
                                        ");")
                                    #self.label_4.setText(this_name)
                                    # 设置名字
                                    self.label_userinfo_all_name.setText(
                                        '姓名:' + this_name)
                                    # 设置性别
                                    self.label_userinfo_all_sex.setText(
                                        '性别:' + this_sex)
                                    # 设置年龄
                                    self.label_userinfo_all_age.setText(
                                        '年龄:' + this_age)
                                    # 设置职位
                                    self.label_userinfo_all_rank.setText(
                                        '职位:' + this_rank)
                                    # 设置部门
                                    self.label_userinfo_all_dept.setText(
                                        '部门:' + this_department)
                                    # 设置签名
                                    self.label_userinfo_all_slogan.setText(
                                        '签名:' + this_slogan)
                                    # 计时器
                                    self.avatar_time = int(time.time())
                                    # 显示头像框的布局
                                    self.verticalLayoutWidget_2.show()
                                    self.label_userinfo_all_bg.show()
                                    # 存储用户的签到数据
                                    # self.save_sign_info({'signout_t': self.names['time_last%s'%i], 'uid':this_uid})
                                    # 签到成功后将最新签到时间更新,方便下次对比
                                    self.names['time_first%s' %
                                               i] = self.names['time_last%s' %
                                                               i]
                                else:
                                    pass

                            # 只要识别到了就跳出循环,
                            # 未识别到就执行else语句,将相似度低的全部设为undefined
                            # 这样就会减少误识率
                            break
                        else:
                            # 如果某一次没识别上就重置变量,让一段时间累计叠加的识别成功次数归零(重置)
                            self.names['is_signto%s' % i] = 0
                            # 在一次循环的匹配中,如果一个人都没有匹配上,那么说明这个人未录入信息
                            # 此时需要给予这个人一个undefined信息,这里用了who->变量
                            name = ""

                    # 保存当前识别的人脸的姓名或(以及)其它数据
                    self.face_names.append(name)
            self.process_this_frame = not self.process_this_frame

            # 将捕捉到的人脸显示出来
            for (top, right, bottom,
                 left), name in zip(self.face_locations, self.face_names):
                # Scale back up face locations since the frame we detected in was scaled to 1/4 size
                # 绘制出的人脸四个方角的坐标点
                top *= self.facescale
                right *= self.facescale
                bottom *= self.facescale
                left *= self.facescale

                top = int(top)
                right = int(right)
                bottom = int(bottom)
                left = int(left)
                #                 cv2.rectangle(frame, (left, top), (right, bottom), (255, 255, 255),  3)
                # 通过四个坐标点计算出方框的1/3宽,以方便后面绘制框出人脸的线条
                right_left_between = int((right - left) / 5)
                #### 绘制四根线条
                # 顶部线条
                cv2.line(frame, (left, top), (left + right_left_between, top),
                         (255, 255, 255), 2)
                cv2.line(frame, (right, top),
                         (right - right_left_between, top), (255, 255, 255), 2)
                # 底部线条
                cv2.line(frame, (left, bottom),
                         (left + right_left_between, bottom), (255, 255, 255),
                         2)
                cv2.line(frame, (right, bottom),
                         (right - right_left_between, bottom), (255, 255, 255),
                         2)
                # 左侧线条
                cv2.line(frame, (left, top), (left, top + right_left_between),
                         (255, 255, 255), 2)
                cv2.line(frame, (left, bottom),
                         (left, bottom - right_left_between), (255, 255, 255),
                         2)
                # 右侧线条
                cv2.line(frame, (right, top),
                         (right, top + right_left_between), (255, 255, 255), 2)
                cv2.line(frame, (right, bottom),
                         (right, bottom - right_left_between), (255, 255, 255),
                         2)
                #                 cv2.rectangle(frame, (left, bottom - 65), (right, bottom), (255, 255, 255), 5)
                #                 font = cv2.FONT_HERSHEY_DUPLEX
                # if frame != None:
                # frame = ft.draw_text(frame, (left+16, bottom-50), name, 34, (255, 255, 255))
                # 调用中文处理函数,默认字体大小为最后一个参数:22
                frame = self.cv2ImgAddText(frame, name, left + 5, bottom - 37,
                                           (255, 255, 255), 12)
            # cv2.imshow('Video', frame)


#                 cv2.putText(frame, name, (left+6, bottom-6), font, 1.0, (255, 255, 255), 1)
#        下方是一种最笨的办法实时显示图片:写入到文件,展示文件
#         cv2.imwrite('./messigray.png',frame)
#         self.label.setStyleSheet("border-image: url(./messigray.png);")
            show = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0],
                                     QtGui.QImage.Format_RGB888)
            self.label_3.setPixmap(QtGui.QPixmap.fromImage(showImage))
            #图像缩放:使用label的setScaledContents(True)方法,自适应label大小
            self.label_3.setScaledContents(True)
            #             cv2.imshow('Video', frame)
            self.onoff_face = True
        else:
            # print(self.onoff_face)
            pass
        
        time.sleep(1)

        #disp.clear()
        #disp.display()

        #time.sleep(1)
        
        face_encodings = face_recognition.face_encodings(output, face_locations)
        
       

                    # Loop over each face found in the frame to see if it's someone we know.
        for face_encoding in face_encodings:
                            # See if the face is a match for the known face(s)
            match = face_recognition.compare_faces([vinicius_face_encoding], face_encoding)
            # name = "Pessoa Desconhecida"
                        

            if match[0]:
                
                name = "Vinicius Ferreira Ribeiro"
                name1 = 'Vinicius'
                gpio.output(16, gpio.HIGH)
                
                
            else:
               
                name = "Nao Consta no banco de dados."
                name1 = 'Nao Consta'
                gpio.output(25, gpio.HIGH)
Ejemplo n.º 18
0
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        encode = face_recognition.face_encodings(img)[0]
        encodedList.append(encode)
    return encodedList


encodelist_known = findEncodings(images)

img = cv2.imread(IMAGE)

image_test = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(image_test)
encodeCurFrame = face_recognition.face_encodings(image_test, facesCurFrame)

for encodeFace, faceLoc in zip(encodeCurFrame, facesCurFrame):
    match_faces = face_recognition.compare_faces(encodelist_known, encodeFace)
    faceDis = face_recognition.face_distance(encodelist_known, encodeFace)
    #print(match_faces)
    matchIndex = np.argmin(faceDis)

    if match_faces[matchIndex]:
        name = classNames[matchIndex].upper()
        face_loc = faceLoc

cv2.rectangle(img, (face_loc[3], face_loc[0]), (face_loc[1], face_loc[2]),
              (255, 0, 255), 2)  # 0- top, 1- right, 2 - bottom, 3 -left
cv2.putText(img, classNames[matchIndex], (face_loc[3], face_loc[2] + 20),
            cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 2)
cv2.imshow('TEST IMAGE', img)
cv2.imshow(classNames[matchIndex].upper(), images[matchIndex])
cv2.waitKey(0)
Ejemplo n.º 19
0
    if not ret:
        break


    # Reduce face size to make calculations easier
    small_frame = cv2.resize(image, (0, 0), fx=0.25, fy=0.25)

    # to save cpu, only do calculations once every 10 frames
    if counter % 3 == 0:
        face_locations = face_recognition.face_locations(small_frame)
        face_encodings = face_recognition.face_encodings(small_frame, face_locations)

        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            match = face_recognition.compare_faces([shan_face_encoding, ojas_face_encoding], face_encoding)
            name = "Unknown"

            if match[0]:
                name = "shantnu"
            elif match[1]:
                name = "ojas"

            face_names.append(name)

    for (top, right, bottom, left), name in zip(face_locations, face_names):
        # Because we made the image smaller, now need to multiply by 4 to get correct size
        top *= 4
        right *= 4
        bottom *= 4
        left *= 4
    # Grab a single frame of video
    ret, frame = input_movie.read()
    frame_number += 1

    # Quit when the input video file ends
    if not ret:
        break

    # Find all the faces and face encodings in the current frame of video
    face_locations = face_recognition.face_locations(frame)
    face_encodings = face_recognition.face_encodings(frame, face_locations)

    face_names = []
    for face_encoding in face_encodings:
        # See if the face is a match for the known face(s)
        match = face_recognition.compare_faces(known_faces, face_encoding, tolerance=0.50)

        # If you had more than 2 faces, you could make this logic a lot prettier
        # but I kept it simple for the demo
        name = None
        if match[0]:
            name = "Sheldon Cooper"
        elif match[1]:
            name = "Penny"

        face_names.append(name)

    # Label the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        if not name:
            continue
Ejemplo n.º 21
0
def facialRec():
    subprocess.call(['sh', './whiteLED.sh'])  #Turn the LED white
    #Initialize 'currentname' to trigger only when a new person is identified.
    currentname = "unknown"
    #Determine faces from encodings.pickle file model created from train_model.py
    encodingsP = "encodings.pickle"
    #use this xml file
    cascade = "haarcascade_frontalface_default.xml"

    # load the known faces and embeddings along with OpenCV's Haar
    # cascade for face detection
    #print("[INFO] loading encodings + face detector...")
    data = pickle.loads(open(encodingsP, "rb").read())
    detector = cv2.CascadeClassifier(cascade)

    # initialize the video stream and allow the camera sensor to warm up
    #print("[INFO] starting video stream...")
    vs = VideoStream(src=0).start()
    time.sleep(2.0)

    # start the FPS counter
    fps = FPS().start()
    valid = False
    user_recognized = False
    # setting the max time for this function to run to 2 mins
    end_time = datetime.datetime.now() + datetime.timedelta(minutes=2)

    while not (user_recognized):
        # grab the frame from the threaded video stream and resize it
        # to 500px (to speedup processing)
        frame = vs.read()
        frame = imutils.resize(frame, width=500)

        # convert the input frame from (1) BGR to grayscale (for face
        # detection) and (2) from BGR to RGB (for face recognition)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # detect faces in the grayscale frame
        rects = detector.detectMultiScale(gray,
                                          scaleFactor=1.1,
                                          minNeighbors=5,
                                          minSize=(30, 30),
                                          flags=cv2.CASCADE_SCALE_IMAGE)

        # OpenCV returns bounding box coordinates in (x, y, w, h) order
        # but we need them in (top, right, bottom, left) order, so we
        # need to do a bit of reordering
        boxes = [(y, x + w, y + h, x) for (x, y, w, h) in rects]

        # compute the facial embeddings for each face bounding box
        encodings = face_recognition.face_encodings(rgb, boxes)
        names = []

        # loop over the facial embeddings
        for encoding in encodings:
            # attempt to match each face in the input image to our known
            # encodings
            matches = face_recognition.compare_faces(data["encodings"],
                                                     encoding)
            name = "Unknown"  #if face is not recognized, then print Unknown

            # check to see if we have found a match
            if True in matches:
                # find the indexes of all matched faces then initialize a
                # dictionary to count the total number of times each face
                # was matched
                matchedIdxs = [i for (i, b) in enumerate(matches) if b]
                counts = {}

                # loop over the matched indexes and maintain a count for
                # each recognized face face
                for i in matchedIdxs:
                    name = data["names"][i]
                    counts[name] = counts.get(name, 0) + 1

                # determine the recognized face with the largest number
                # of votes (note: in the event of an unlikely tie Python
                # will select first entry in the dictionary)
                name = max(counts, key=counts.get)

                #If someone in your dataset is identified, print their name on the screen
                if currentname != name:
                    currentname = name
                    user_recognized = True
                    print("The user is: " + currentname)

                    if ((currentname == 'Moisess') or (currentname == 'Zach')):
                        #set the valid bit to true
                        valid = True
                        new_end_time = datetime.datetime.now(
                        ) + datetime.timedelta(seconds=30)
                        GPIO.output(
                            6, 1
                        )  # Set the GPIO to the MSP430 to High, signaling the U$
                        while True:
                            if (GPIO.input(12)):
                                print(
                                    "Speech and face recognized, changing LEDs to green"
                                )
                                subprocess.call(['sh', './greenLED.sh'
                                                 ])  #Turn the LED Green
                                break
                            if datetime.datetime.now() >= new_end_time:
                                print('inner time limit reached')
                                break
                        time.sleep(60)
                        GPIO.output(6, 0)


#                        if(GPIO.input(12) and valid == True):
#                            print("Speech and face recognized, changing LEDs to green")
#                            subprocess.call(['sh', './greenLED.sh']) #Turn the LED Green
#                            GPIO.output(6, 1) # Set the GPIO to the MSP430 to High, signaling the User's Pin# was detected
#                            time.sleep(240)
#                            GPIO.output(6, 0)
#                        elif(GPIO.input(12) or valid == True):
#                            print("Speech Recognized or Face Recognized, changing LEDs to blue")
#                            subprocess.call(['sh', './greenLED.sh']) #Turn the LED Yellow
#                            time.sleep(3)
#			    GPIO.output(6, 1) # Set the GPIO to the MSP430 to High, signaling the User's Pin# was detected
#			    new_end_time = datetime.datetime.now() + datetime.timedelta(seconds=30)
#			    while True:
#                                if(GPIO.input(12) and valid == True):
#					print("Speech and face recognized, changing LEDs to green")
#                                 	subprocess.call(['sh', './greenLED.sh']) #Turn the LED Green
#					break
#                                if datetime.datetime.now() >= new_end_time:
#                                	print ('time limit reached')
#                                 	break

#			    time.sleep(60)
#                            GPIO.output(6, 0)

# elif(GPIO.input(12) or valid == True):
# print("Face Recognized but not speech, changing LEDs to blue")
#subprocess.call(['sh', './blueLED.sh']) #Turn the LED Blue

# setting the max time for this function to run to 2 mins
#new_end_time = datetime.datetime.now() + datetime.timedelta(seconds=30)

#GPIO.output(6, 1) # Set the GPIO to the MSP430 to High, signaling the User's Pin#
#while True:
# if(GPIO.input(12) and valid == True):
#    print("Speech and face recognized, changing LEDs to green")
#   subprocess.call(['sh', './greenLED.sh']) #Turn the LED Green
#if valid or datetime.datetime.now() >= newend_time:
#   print ('time limit reached')
#  break
#time.sleep(5)
#GPIO.output(6, 0)
#break
                    else:
                        print('Unauthorized user!')
                        subprocess.call(['sh',
                                         './redLED.sh'])  #Turn the LED red
                        time.sleep(2)

                time.sleep(1)

            # update the list of names
            names.append(name)

        # update the FPS counter
        fps.update()
        #if the user is identified or the time limit is reached then exit
        if valid or datetime.datetime.now() >= end_time:
            print('time limit reached')
            break
    # stop the timer and display FPS information
    fps.stop()
    print("Ending facialRec()...")
    subprocess.call(['sh', './offLED.sh'])  #Turn the LED off
    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
    time.sleep(10)
Ejemplo n.º 22
0
    crop_img = frame[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    
    smallImg = cv2.resize(crop_img, (0,0), fx=0.25, fy=0.25) 
    rgb_small_frame = smallImg[:, :, ::-1]
    # Only process every other frame of video to save time
    if frameCount % 4 = 0:
        frameCount = 0
        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(
            rgb_small_frame, face_locations)

        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
            # If a match was found in known_face_encodings, just use the first one.
            if True in matches:
                first_match_index = matches.index(True)
                name = known_face_names[first_match_index]
                ai.addPerson(name)
            else:
                ai.foundUnknownPerson()

    frameCount = frameCount + 1

    big = cv2.resize(crop_img, (0,0), fx=5, fy=5) 
    cv2.imshow('Video', big)

    # Hit 'q' on the keyboard to quit!
    if cv2.waitKey(1) & 0xFF == ord('q'):
Ejemplo n.º 23
0
def compare_choices():
    result=face_recognition.compare_faces([enc1],enc2,0.5)
    if result==[True]:
        canvas.create_text(400,400,text='The images are of the same person!!!', fill='green',font=('Helvetica',30))
    else:
        canvas.create_text(400,400,text='The images are not of the same person!!!', fill='red',font=('Helvetica',30))
Ejemplo n.º 24
0
def main():
    # GETTING KNOWN ENCODINGS AND NAMES
    home = str(os.path.dirname(os.path.abspath(__file__))) + "/../../"
    known_encodings_file_path = home + "/data/known_encodings_file.csv"
    people_file_path = home + "/data/people_file.csv"
    # For storing the encoding of a face
    known_encodings_file = Path(known_encodings_file_path)
    if known_encodings_file.is_file():
        known_encodings = np.genfromtxt(known_encodings_file, delimiter=',')
    else:
        known_encodings = []

    # #For Storing the name corresponding to the encoding
    people_file = Path(people_file_path)
    if people_file.is_file():
        people = np.genfromtxt(people_file, dtype='U',delimiter=',')
    else:
        people = []



# MAIN WORK

    #Capture Video indefinitely
    video_capture = cv2.VideoCapture(0)
    # time.delay(2)
    # TODO: GET FROM DATABASE
    # known encodings of persons in database.
    # known_encodings = []
    # people = []

    #Some important variables
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True
    #Eat the Meat, Hmm process the image
    while True:

        # 
        #     1.) Capture the frame from the video.
        #     2.) Compress it to its 1/4th size for faster speed.
        #     3.) If this frame has to be processed, find face_location, face_encodings.
        #     4.) Match with the known_encodings and set the name for each face else Unknown
        #     5.) Add a border around face.
        #         if RED: 
        #             unverified or not authenticated
        #         elif GREEN:
        #             everything OK ;)
        #     6.) Show the frame 
        # 
        ret, frame = video_capture.read()

        #smaller frame 1/4th of original size
        small_frame = cv2.resize(frame, (0,0), fx=.25, fy=.25)

        if process_this_frame:
            #Find the face locations
            face_locations = face_recognition.face_locations(small_frame)
            #Find the face encodings 128 Dimensional!!
            face_encodings = face_recognition.face_encodings(small_frame, face_locations)

            face_names=[]
            other = 0 #Count of un-authorised people
            for face_encoding in face_encodings:
                match = face_recognition.compare_faces(known_encodings, face_encoding)
                name = "Unknown"

                #Find if this person is in the present people array
                for i in range(len(match)):
                    if match[i]:
                        name = people[i]
                        break

                if "Unknown" in name:
                    other += 1
                    name += str(other)
                face_names.append(name)
        
        # Send the names of the people to the parent process
        # os.write(3,b'{"dt" : "This is a test"}')
        print(face_names, flush=True)
            
        process_this_frame = not process_this_frame
        

        #Display the border
        for (top, right, bottom, left),name in zip(face_locations, face_names):
            #Scale up the coordinates by 4 to get face
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            #Assuming person in authenticated
            color =  (0,255,0)  #GREEN
            if not authorised(name):
                #Unauthenticated person
                color = (0,0,255) #RED
                #print so that parent process in Node.js can use it
                print(name,flush=True)

            #Display border
            cv2.rectangle(frame, (left,top), (right,bottom), color, 2)

            # Draw a label with name
            cv2.rectangle(frame, (left,bottom-35), (right, bottom), color, cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name,(left+6, bottom-6), font, 1.0, (255,255,255), 1)

        # Display the resulting image with borders and names
        cv2.imshow('Video', frame)

        # Hit 'q' on keyboard to quit
        if cv2.waitKey(100) == 27:
            break
            
    #Release handle to the webcam
    video_capture.release()
    cv2.closeAllWindows()
imgElon = cv2.cvtColor(imgElon, cv2.COLOR_BGR2RGB)
imgElon_test = face_recognition.load_image_file(IMAGE_TEST)
imgElon_test = cv2.cvtColor(imgElon_test, cv2.COLOR_BGR2RGB)

face_location = face_recognition.face_locations(imgElon)[0]
encode_elon = face_recognition.face_encodings(imgElon)[0]
cv2.rectangle(imgElon, (face_location[3],face_location[0]),
              (face_location[1],face_location[2]),(255,0,255),2) # top, right, bottom, left


faceLoc_test = face_recognition.face_locations(imgElon_test)[0]
encode_test = face_recognition.face_encodings(imgElon_test)[0]
cv2.rectangle(imgElon_test,(faceLoc_test[3],faceLoc_test[0]),
              (faceLoc_test[1],faceLoc_test[2]),(255,0,255),2) # top, right, bottom, left

results = face_recognition.compare_faces([encode_elon], encode_test)
faceDis = face_recognition.face_distance([encode_elon], encode_test)
print (results, faceDis)

if results:
    result = ' Match found '
else:
    result = ' No Match '
    
cv2.putText(imgElon, 'Elon Musk' , (50,50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2 )
cv2.putText(imgElon_test, f'{result} {round(faceDis[0],2)}', (50,50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2 )

cv2.imshow('Elon-Musk', imgElon)
cv2.imshow('Elon-test', imgElon_test)
cv2.waitKey(0)
    # encode landmarks
    landmarks = face_recognition.face_landmarks(
        landmark_points=landmark_points)

    return encodings, landmarks


# Get the face encodings for each face in each image file
# Since there could be more than one face in each image, it returns a list of encodings.
# But since I know each image only has one face, I only care about the first encoding in each image, so I grab index 0.
try:
    biden_face_encoding = face_encodings_and_landmarks(biden_image)[0][0]
    obama_face_encoding = face_encodings_and_landmarks(obama_image)[0][0]
    unknown_face_encoding = face_encodings_and_landmarks(unknown_image)[0][0]
except IndexError:
    print(
        "I wasn't able to locate any faces in at least one of the images. Check the image files. Aborting..."
    )
    quit()

known_faces = [biden_face_encoding, obama_face_encoding]

# results is an array of True/False telling if the unknown face matched anyone in the known_faces array
results = face_recognition.compare_faces(known_faces, unknown_face_encoding)

print("Is the unknown face a picture of Biden? {}".format(results[0]))
print("Is the unknown face a picture of Obama? {}".format(results[1]))
print(
    "Is the unknown face a new person that we've never seen before? {}".format(
        True not in results))
# Recognize faces in images and identify who they are
import face_recognition

known_pic = face_recognition.load_image_file("known_pictures/nicole.jpg")
print("what is the face_encoding: ",
      face_recognition.face_encodings(known_pic))
known_face_encoding = face_recognition.face_encodings(known_pic)[0]

unknown_pic = face_recognition.load_image_file("unknown_pictures/unknown1.jpg")
unknown_face_encoding = face_recognition.face_encodings(unknown_pic)[0]
res = face_recognition.compare_faces([known_face_encoding],
                                     unknown_face_encoding)

if res[0] == True:
    print("obama")
else:
    print("This picture is unrecognized! Please try it again!")
imgElon = face_recognition.load_image_file('ImageBasic/Elon Musk.jpg')
imgElon = cv2.cvtColor(imgElon, cv2.COLOR_BGR2RGB)
imgTest = face_recognition.load_image_file('ImageBasic/elon musky test.jpg')
imgTest = cv2.cvtColor(imgTest, cv2.COLOR_BGR2RGB)

imgMads = face_recognition.load_image_file(
    'ImageAttendance/Mads Mikkelsen.jpg')
imgMads = cv2.cvtColor(imgMads, cv2.COLOR_BGR2RGB)

# place square around detected face
faceLoc = face_recognition.face_locations(imgElon)[0]
encodeElon = face_recognition.face_encodings(imgElon)[0]
#print(faceLoc)
cv2.rectangle(imgElon, (faceLoc[3], faceLoc[0]), (faceLoc[1], faceLoc[2]),
              (255, 0, 230), 3)

faceLocTest = face_recognition.face_locations(imgTest)[0]
encodeTest = face_recognition.face_encodings(imgTest)[0]
cv2.rectangle(imgTest, (faceLocTest[3], faceLocTest[0]),
              (faceLocTest[1], faceLocTest[2]), (255, 0, 230), 3)

# compare
results = face_recognition.compare_faces([encodeElon], encodeTest)
faceDis = face_recognition.face_distance([encodeElon], encodeTest)
print(results, faceDis)
cv2.putText(imgTest, f'{results} {round(faceDis[0], 2)}', (50, 50),
            cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 255), 2)

cv2.imshow('Elon Muskrat', imgElon)
cv2.imshow('Elon Test', imgTest)
cv2.waitKey(0)
Ejemplo n.º 29
0
        ##print(len(i))
        z.append(float(i))
    return np.array(z)

def getfaceencoding(filename):
    image = face_recognition.load_image_file(filename)
    encoding = face_recognition.face_encodings(image)[0]
    x = encoding2string(encoding)
    print(x)
    return x

def comparefaceencodings(e1, e2)    
    y = string2encoding(e1)
    z = string2encoding(e2)

    results = face_recognition.compare_faces([y], [z])
    return results[0]


def downloadpic(pic_url):
    

    with open('pic1.jpg', 'wb') as handle:
            response = requests.get(pic_url, stream=True)

            if not response.ok:
                print (response)

            for block in response.iter_content(1024):
                if not block:
                    break
Ejemplo n.º 30
0
    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

    #Convertendo a imagem de BRG que é uilizada Opencv para RGB que é o padrão do face_recognition
    rgb_small_frame = small_frame[:, :, ::-1]

    #Processando somente todos os outros frames do vídeo pra economizar tempo
    if process_this_frame:
        #Encontrando todas as faces e face_encodings no frame de video atul
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(
            rgb_small_frame, face_locations)

        face_names = []
        for face_encoding in face_encodings:
            #Verificando se a face corresponde a uma das faces conhecidas
            matches = face_recognition.compare_faces(known_face_encodings,
                                                     face_encoding)
            name = "Unknown"

            #Se um "match" foi encontrado em Known_face_encodings, usa-se somente o primeiro
            if True in matches:
                first_match_index = matches.index(True)
                name = known_face_names[first_match_index]
            face_names.append(name)
    process_this_frame = not process_this_frame

    #Exibindo o resultado
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        # Escalando os locais das faces de backup, pois o quadro detectado foi dimensionado para 1/4 do tamanho
        top *= 4
        right *= 4
        bottom *= 4
Ejemplo n.º 31
0
known_face_encondings = [varun_face_encoding, anerudh_face_encoding]
known_face_names = ["Varun", "Anerudh"]

while True:
    ret, frame = video_capture.read()

    rgb_frame = frame[:, :, ::-1]

    face_locations = fr.face_locations(rgb_frame)
    face_encodings = fr.face_encodings(rgb_frame, face_locations)

    for (top, right, bottom,
         left), face_encoding in zip(face_locations, face_encodings):

        matches = fr.compare_faces(known_face_encondings, face_encoding)

        name = "Unknown"

        face_distances = fr.face_distance(known_face_encondings, face_encoding)

        best_match_index = np.argmin(face_distances)

        if matches[best_match_index]:
            name = known_face_names[best_match_index]

        cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 1)
        cv2.putText(frame, name, (left + 6, bottom - 6),
                    cv2.FONT_HERSHEY_TRIPLEX, 1.0, (255, 255, 255), 1)
        cv2.imshow('face_recog', frame)
Ejemplo n.º 32
0
    cv2.imshow("input", img)
    #cv2.imshow("thresholded", imgray*thresh2)

    key = cv2.waitKey(10)
    if key == 27:
        break


cv2.destroyAllWindows()
cv2.VideoCapture(0).release()

picture_of_me = face_recognition.load_image_file("elma.jpg")
my_face_encoding = face_recognition.face_encodings(picture_of_me)[0]

# my_face_encoding now contains a universal 'encoding' of my facial features that can be compared to any other picture of a face!

unknown_picture = face_recognition.load_image_file("IMG_5024.jpg")
unknown_face_encoding = face_recognition.face_encodings(unknown_picture)
Y= len(unknown_face_encoding)

x=0
# Now we can see the two face encodings are of the same person with `compare_faces`!
while Y>x:
    results = face_recognition.compare_faces([my_face_encoding], unknown_face_encoding[x])

    if results[0] == True:
        print("It's a picture of me!",x)
    else:
        print("It's not a picture of me!",x)

    x=x+1
Ejemplo n.º 33
0
def capture():
    # Get a reference to webcam #0 (the default one)
    video_capture = cv2.VideoCapture(0)

    # Initialize some variables
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()

        # Rotate 90 degrees
        #frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []
            # with faceLock:
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                if len(known_face_encodings) == 0:
                    name = "Unknown"
                    face_names.append(name)
                    continue

                matches = face_recognition.compare_faces(
                    known_face_encodings, face_encoding)
                name = "Unknown"

                # # If a match was found in known_face_encodings, just use the first one.
                # if True in matches:
                #     first_match_index = matches.index(True)
                #     name = known_face_names[first_match_index]

                # Or instead, use the known face with the smallest distance to the new face
                face_distances = face_recognition.face_distance(
                    known_face_encodings, face_encoding)
                best_match_index = np.argmin(face_distances)
                if matches[best_match_index]:
                    name = known_face_names[best_match_index]
                    uploadCapture(frame)

                face_names.append(name)

        process_this_frame = not process_this_frame

        # Display the results
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)

        # Display the resulting image
        cv2.imshow('Video', frame)

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()
Ejemplo n.º 34
0
def startImageProcessing():
    trueFacesLoc = "/home/surabhiullas/Desktop/HobbyProjects/Face_Recognition/trueData/"

    # Load the label map
    with open("../models/labelmap.txt", 'r') as f:
        labels = [line.strip() for line in f.readlines()]

    # Have to do a weird fix for label map if using the COCO "starter model" from
    # https://www.tensorflow.org/lite/models/object_detection/overview
    # First label is '???', which has to be removed.
    if labels[0] == '???':
        del (labels[0])

    interpreter = Interpreter("../models/detect.tflite")

    interpreter.allocate_tensors()

    # Get model details
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()
    height = input_details[0]['shape'][1]
    width = input_details[0]['shape'][2]

    floating_model = (input_details[0]['dtype'] == np.float32)

    input_mean = 127.5
    input_std = 127.5

    # Get a reference to webcam #0 (the default one)
    for camera in glob.glob("/dev/video?"):
        video_capture = cv2.VideoCapture(camera)
        if video_capture.isOpened():
            break

    # Initialize some variables
    known_face_encodings = []
    known_face_names = []
    face_locations = []
    face_encodings = []
    face_names = []

    process_this_frame = True

    previous_fps = 0
    average_fps = 30  # default value

    frame_counter = 0
    presence_counter = 0
    absence_counter = 0
    lock_flag = False
    trueFacesFound = False

    NumTrainImages = sum(len(fs) for _, _, fs in os.walk(trueFacesLoc))

    # Check if Re-training is needed
    if ReTrainingNeeded(NumTrainImages) == False:
        trainData = np.zeros((1, 128))
        for trueImage in glob.iglob(trueFacesLoc + '**/*', recursive=True):
            if "jpg" in trueImage or "png" in trueImage:
                imageName = os.path.join(trueFacesLoc, trueImage)
                image = face_recognition.load_image_file(imageName)
                encoding = face_recognition.face_encodings(image)[0]
                known_face_encodings.append(encoding)
                face_names.append(trueImage)
                encoding = encoding.reshape((1, encoding.shape[0]))
                trainData = np.concatenate((trainData, encoding), axis=0)

        trainData = trainData[1:, ...]
        faceModel = svm.OneClassSVM(kernel="rbf", degree=4,
                                    nu=0.1)  #, gamma=0.05)
        trainSVM(faceModel, trainData)

    else:  # No re-training is needed. Already trained and no update in the training Data. Hence load the pre-trained model
        modelName = '../models/FaceModel.pickle'
        faceModel = utils.ReadPickle(modelName)
        known_face_encodings = utils.ReadPickle("../models/trianedData.pickle")

    # Get the Tick Frequency to calculate FPS
    freq = cv2.getTickFrequency()

    false_face_counter = 0

    # Store and modify password as required to unlock the gnome screen
    set_password()

    pwd = (" ".join(get_password()))

    while True:
        face_label = []  # place holder for the face_labels

        # Start timer (for calculating frame rate)
        t1 = cv2.getTickCount()

        trueFacesFound = False

        # Grab a single frame of video
        try:
            ret, frame = video_capture.read()
            small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
        except:
            continue

        frame_counter += 1

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []

            if len(face_encodings) > 0:
                falseFacesFound = True  #Initialyy assume false face is detected
            else:
                falseFacesFound = False  #No face found, hence no falseFace

            for face_encoding in face_encodings:

                img = face_encoding.reshape((1, face_encoding.shape[0]))
                prediction = faceModel.predict(img)

                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(known_face_encodings,
                                                         face_encoding,
                                                         tolerance=0.6)
                if any(matches) == True or prediction == 1:
                    falseFacesFound = False
                    absence_counter = 0
                    false_face_counter = 0
                    trueFacesFound = True
                    face_label.append("known")

                    if lock_flag == True:
                        presence_counter += 1
                        if presence_counter > 2:
                            os.popen(
                                'gnome-screensaver-command --deactivate && sleep 15 && xdotool key --delay 50 '
                                + pwd)
                            presence_counter = 0
                            lock_flag = False
                else:
                    face_label.append("unknown")
            """
            If only a False Face is detected, immediately lock the screen.
            If both actual user and false face is detected, then do not lock screen.
            """

            if falseFacesFound == True:
                false_face_counter += 1
                if false_face_counter > 5:
                    os.popen('gnome-screensaver-command --lock')
                    lock_flag = True
                    cv2.imwrite("intruder.jpg", frame)
                    send_intruder_alert("intruder.jpg")
                    print("Intruder alert sent")
                    false_face_counter = 0

            if lock_flag == False and trueFacesFound == False:
                """
                    When Face is not detected, perform Object Detection to Check
                    if the user is really not in place. Because, hand/objects over the
                    face or covering the face partially will make face_recognition
                    library Return 0 faces. In those cases do not increment the
                    absence counter.

                    This technique will not cause any third person to access
                    because, as soon as intruder showcases his wife, the screen
                    will be locked
                """
                lock_waiting_factor = 0.5
                do_not_increment = False
                frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                frame_resized = cv2.resize(frame_rgb, (width, height))
                input_data = np.expand_dims(frame_resized, axis=0)

                # Normalize pixel values if using a floating model (i.e. if model is non-quantized)
                if floating_model:
                    input_data = (np.float32(input_data) -
                                  input_mean) / input_std

                # Perform the actual detection by running the model with the image as input
                interpreter.set_tensor(input_details[0]['index'], input_data)
                interpreter.invoke()
                classes = interpreter.get_tensor(output_details[1]['index'])[
                    0]  # Class index of detected objects
                scores = interpreter.get_tensor(output_details[2]['index'])[
                    0]  # Confidence of detected objects
                # Loop over all detections and draw detection box if confidence is above minimum threshold
                for i in range(len(scores)):
                    if ((scores[i] > 0.5) and (scores[i] <= 1.0)):
                        if int(
                                classes[i]
                        ) == 0:  # Check if person is detected, if then do not increment absence counter
                            lock_waiting_factor = 10
                            # do_not_increment = True

                # if do_not_increment == False:
                absence_counter += 1
                if absence_counter > lock_waiting_factor * average_fps:  #3s assuming 15fps
                    os.popen('gnome-screensaver-command --lock')
                    absence_counter = 0
                    lock_flag = True

            t2 = cv2.getTickCount()
            average_fps = freq / (t2 - t1)
            cv2.putText(frame, 'FPS: {0:.2f}'.format(average_fps), (30, 50),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2,
                        cv2.LINE_AA)
            utils.displayResultImage(frame, face_locations, face_label)

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()
Ejemplo n.º 35
0
encodelistknown = findencodings(images)
print("encoding complete")

cap = cv2.VideoCapture(0)

while True:
    success, img = cap.read()
    imgs = cv2.resize(img, (0, 0), None, 0.25, 0.25)
    imgs = cv2.cvtColor(imgs, cv2.COLOR_BGRA2RGB)

    facescurframe = face_recognition.face_locations(imgs)
    encodescurframe = face_recognition.face_encodings(imgs, facescurframe)

    for encodeface, faceloc in zip(encodescurframe, facescurframe):
        matches = face_recognition.compare_faces(encodelistknown, encodeface)
        facedis = face_recognition.face_distance(encodelistknown, encodeface)
        print(facedis)
        matchindex = np.argmin(facedis)

        if matches[matchindex]:
            name = classNames[matchindex].upper()
            print(name)
            y1, x2, y2, x1 = faceloc
            y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
            cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
            cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0),
                          cv2.FILLED)
            cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (255, 255, 255), 2)
    # Grab a single frame of video
    ret, frame = video_capture.read()

    # Resize frame of video to 1/4 size for faster face recognition processing
    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

    # Only process every other frame of video to save time
    if process_this_frame:
        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(small_frame)
        face_encodings = face_recognition.face_encodings(small_frame, face_locations)

        face_names = []
        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            match = face_recognition.compare_faces([obama_face_encoding], face_encoding)
            name = "Unknown"

            if match[0]:
                name = "Barack"

            face_names.append(name)

    process_this_frame = not process_this_frame


    # Display the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        # Scale back up face locations since the frame we detected in was scaled to 1/4 size
        top *= 4
        right *= 4
Ejemplo n.º 37
0
def main():
  # 処理プログラムの初期化
  process_this_frame = True

  while True:
    # ビデオの単一フレームを取得
    _, frame = video_capture.read()

    # 時間を節約するために、フレーム毎に処理をスキップ
    if process_this_frame:
      # 画像を縦1/4 横1/4に圧縮
      small_frame = cv2.resize(frame, (0,0), fx=0.25, fy=0.25)

      # 顔の位置情報を検索
      face_locations = face_recognition.face_locations(small_frame)

      # 顔画像の符号化
      face_encodings = face_recognition.face_encodings(small_frame, face_locations)

      # 名前配列の初期化
      face_names = []

      for face_encoding in face_encodings:
        # 顔画像が登録画像と一致しているか検証
        matches = face_recognition.compare_faces(known_face_encodings, face_encoding, threshold)
        name = "Unknown"

        # 顔画像と最も近い登録画像を候補とする
        face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
        best_match_index = np.argmin(face_distances)
        if matches[best_match_index]:
          name = known_face_names[best_match_index]

        face_names.append(name)

    # 処理フラグの切替
    process_this_frame = not process_this_frame

    # 位置情報の表示
    # for (top, right, bottom, left) in face_locations:
    for (top, right, bottom, left),name in zip(face_locations, face_names):

      # 圧縮した画像の座標を復元
      top *= 4
      right *= 4
      bottom *= 4
      left *= 4

      # 顔領域に枠を描画
      cv2.rectangle(frame, (left, top), (right, bottom), (0,0,255), 2)

      # 顔領域の下に枠を表示
      cv2.rectangle(frame, (left, bottom-35), (right, bottom), (0.0,255), cv2.FILLED)
      # font = cv2.FONT_HERSHEY_COMPLEX
      # cv2.putText(frame, name, (left + 6, bottom -6), font, 1.0, (255, 255, 255), 1)

      #日本語表示
      fontpath = 'klee.ttc'
      font = ImageFont.truetype(fontpath, 32)
      img_pil = Image.fromarray(frame)
      draw = ImageDraw.Draw(img_pil)
      position = (left + 6, bottom - 40)

      # drawにテキストを記載
      draw.text(position, name, font=font, fill=(255,255,255,0))
      frame = np.array(img_pil)

      # 本人確認
      if mode == 1 and name != "Unknown":
        check_password(name)

    # 結果をビデオに表示
    cv2.imshow('Video', frame)

    # ESCキーで終了
    if cv2.waitKey(1) == 27:
      break
import face_recognition

# Load the jpg files into numpy arrays
biden_image = face_recognition.load_image_file("biden.jpg")
obama_image = face_recognition.load_image_file("obama.jpg")
unknown_image = face_recognition.load_image_file("obama2.jpg")

# Get the face encodings for each face in each image file
# Since there could be more than one face in each image, it returns a list of encordings.
# But since I know each image only has one face, I only care about the first encoding in each image, so I grab index 0.
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
unknown_face_encoding = face_recognition.face_encodings(unknown_image)[0]

known_faces = [
    biden_face_encoding,
    obama_face_encoding
]

# results is an array of True/False telling if the unknown face matched anyone in the known_faces array
results = face_recognition.compare_faces(known_faces, unknown_face_encoding)

print("Is the unknown face a picture of Biden? {}".format(results[0]))
print("Is the unknown face a picture of Obama? {}".format(results[1]))
print("Is the unknown face a new person that we've never seen before? {}".format(not True in results))
Ejemplo n.º 39
0
    frame = vs.read()
    # convert the input frame from BGR to RGB then resize it to have
    # a width of 750px (to speedup processing)
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    rgb = imutils.resize(frame, width=750)
    r = frame.shape[1] / float(rgb.shape[1])
    # detect the (x, y)-coordinates of the bounding boxes
    # corresponding to each face in the input frame, then compute
    # the facial embeddings for each face
    boxes = face_recognition.face_locations(rgb,
                                            model=args["detection_method"])
    encodings = face_recognition.face_encodings(rgb, boxes)
    names = []

    for encoding in encodings:
        matches = face_recognition.compare_faces(data["encodings"], encoding)
        name = "Unknown"
        # find the indexes of all matched faces then initialize a dictionary to count the total number of times each face was matched
        if True in matches:
            # we need to determine the indexes of where these True  values are in matches, we construct a simple list of matchedIdxs
            matchedIdxs = [i for (i, b) in enumerate(matches) if b]
            counts = {}
            for i in matchedIdxs:
                name = data["names"][i]
                counts[name] = counts.get(name, 0) + 1
        # determine the recognized face with the largest number of votes (note: in the event of an unlikely tie Python will select first entry in the dictionary)
            name = max(counts, key=counts.get)
        names.append(name)

    for ((top, right, bottom, left), name) in zip(boxes, names):
        # rescale the face coordinates
Ejemplo n.º 40
0
def stream_response_generator():
#this makes a web cam object 
    i=1
    video_capture=cv2.VideoCapture(0)
    while True:
        ret, frame = video_capture.read()
        # cv2.imwrite("test.jpg", frame)
        # frame_number += 1
        face_locations = face_recognition.face_locations(frame)
        face_encodings = face_recognition.face_encodings(frame, face_locations)
        face_names = []
        for face_encoding in face_encodings:
            match = face_recognition.compare_faces(face_data, face_encoding, tolerance=0.50)
            name = None
            if True in match:
                first_match_index = match.index(True)
                name = face_name_list[first_match_index]
                if('aid_' in name):
                    try:
                        data=select.select_user(name)
                        print ("*"*60)
                        print ("\t\t::USER DISCRIPTIONS::")
                        print ("*"*60)
                        print ("USER ID  :",data[1])
                        time=str(data[2])
                        print ("RECOGNIZED TIME :",time[11:],time[0:11])
                        print ("*"*60,'\n\n')
                        current_time=datetime.now()
                        update.update_time_stamp(current_time,name)
                    except:
                        print("EXCEPTIONS:",name)
                    # print(name)
            else:
                # name="unknow"
                gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
                faces=face_cascade.detectMultiScale(gray,1.3,5)
                for (top, right, bottom, left) in faces:
                    fram=frame[right-50:right+left+50,top-50:top+bottom+100]
                    filename="iSee/static/temp.jpg"
                    out = cv2.imwrite(filename, fram)
                    face_locations = face_recognition.face_locations(fram)
                    face_encodings = face_recognition.face_encodings(fram, face_locations)
                    for face_encoding in face_encodings:
                        match = face_recognition.compare_faces(face_data, face_encoding, tolerance=0.50)
                        name = None
                        if True in match:
                            first_match_index = match.index(True)
                            name = face_name_list[first_match_index]
                        else:
                            current_time=datetime.now()
                            id=insert.create_new_user(current_time)
                            filename='iSee/static/img_data/wait_list/'+id+'.jpg'
                            out = cv2.imwrite(filename, fram)               
                            # out = cv2.imwrite(filename, frame)
                            image = face_recognition.load_image_file(filename)
                            data = face_recognition.face_encodings(image)[0]
                            face_data.append(data)
                            face_name_list.append(id)   
                            row = [id]
                            for i in data:
                                row.append(float(i))
                            with open('iSee/static/trained_data/user_face_info.csv', 'a') as csvFile:
                                writer = csv.writer(csvFile)
                                writer.writerow(row)
                            csvFile.close()
                            print ("new face recognized Time:",current_time)
                            data=select.select_user(id)
                            os.system(' telegram-cli -k server.pub -W -e "msg Alertsystem WARNING: A NEW PERSON HAS ENTERED !!!!  " "safe_quit" ')
                            os.system(' telegram-cli -k server.pub -W -e "send_photo Alertsystem %s" "safe_quit"' %(filename) )
                            os.system(' telegram-cli -k server.pub -W -e "msg Alertsystem NEW USER_ID: %s " "safe_quit" '%(id))
                            os.system(' telegram-cli -k server.pub -W -e "msg Alertsystem Accept : http://%s:%s/iSee/accept/%s " "safe_quit" '%(local_ip,port,id))
                            os.system(' telegram-cli -k server.pub -W -e "msg Alertsystem Reject : http://%s:%s/iSee/reject/%s " "safe_quit" '%(local_ip,port,id))
            face_names.append(name)
            for (top, right, bottom, left), name in zip(face_locations, face_names):
                if not name:
                    continue
                if('wid_'in name):
                    c=(0,165,255)
                elif('aid_'in name):
                    c=(34,139,34)
                else:
                    c= (0, 0, 255)
                cv2.rectangle(frame, (left, top), (right, bottom),c, 2)
                crop_img = frame[top:bottom, left:right]
                cv2.rectangle(frame, (left, bottom - 35), (right, bottom), c, cv2.FILLED)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
        imgencode=cv2.imencode('.jpg',frame)[1]
        stringData=imgencode.tostring()
        yield (b'--frame\r\n'
			b'Content-Type: text/plain\r\n\r\n'+stringData+b'\r\n')
        i+=1
    del(camera)
def findface():
    import face_recognition
    import cv2
    import time
    import numpy as np
    from logfile import timee
    from outtime import timeout
    import datetime
    x = str(datetime.datetime.now())
    date = (x.split(" ",)[0])
    video_capture = cv2.VideoCapture(0)
    employee_face = ["obama.jpg","harsh.jpg"]
    obama_image = face_recognition.load_image_file(employee_face[0])
    obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
    harsh_image = face_recognition.load_image_file(employee_face[1])
    harsh_face_encoding = face_recognition.face_encodings(harsh_image)[0]
    known_face_encodings = [
        obama_face_encoding,
        harsh_face_encoding
    ]
    known_face_names = [
        "Barack Obama",
        "harsh"
    ]
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True

    while True:
        ret, frame = video_capture.read()
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
        rgb_small_frame = small_frame[:, :, ::-1]
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)

            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
                name = "Unknown"

                # # If a match was found in known_face_encodings, just use the first one.
                # if True in matches:
                #     first_match_index = matches.index(True)
                #     name = known_face_names[first_match_index]

                # Or instead, use the known face with the smallest distance to the new face
                face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
                best_match_index = np.argmin(face_distances)
                if matches[best_match_index]:
                    name = known_face_names[best_match_index]

                face_names.append(name)

        process_this_frame = not process_this_frame


        # Display the results
        for (top, right, bottom, left), name in zip(face_locations, face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
        cv2.imshow('Video', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    

        if(name != "Unknown" ):
            timee(name)
            exit()             # this will save the name into the database automatically
            break
            video_capture.release()
            cv2.destroyAllWindows()

    # start from here 
    # to recognise the face for timeout
    
    # import sqlite3
    # conn = sqlite3.connect('logfile.db')
    # cursor = conn.execute("SELECT DATE,NAME from names")
    # for row in cursor:
    #     if(row[0] == date and row[1] == name):
    #         timeout(name)

    
    video_capture.release()
    cv2.destroyAllWindows()
Ejemplo n.º 42
0
image = cv2.imread(args["image"])
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# detect the (x, y) coordinates of the bounding box corresponding to
# each face inthe input image and compute facial embeddings for each face
print("[INFO] recognizing faces...")
boxes = face_recognition.face_locations(rgb, model = args["detection_method"])
encodings = face_recognition.face_encodings(rgb, boxes)

# initialize the list of names of detected faces
names = []

# loop over facial embeddings
for encoding in encodings:
    # compares each face in the input image to our known encodings
    matches = face_recognition.compare_faces(data["encodings"], encoding)
    name = "Unknown"

    # check if  match is found or not
    if True in matches:
        #find the indexes of all matches and initialize a dictionary
        # to count number of times a match occur
        matchedIdxs = [i for (i, b) in enumerate(matches) if b]
        counts = {}

        # loop over matched indexes and maintain a count for each face
        for i in matchedIdxs:
            name = data["names"][i]
            counts[name] = counts.get(name, 0) + 1

        # Select the recognized face with maximum number of matches and
import cv2
import face_recognition

img1 = face_recognition.load_image_file('obama.jpg')
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img1Test = face_recognition.load_image_file('obama2.jpg')
img1Test = cv2.cvtColor(img1Test, cv2.COLOR_BGR2RGB)

face = face_recognition.face_locations(img1)[0]
print(face)
encodeFace = face_recognition.face_encodings(img1)[0]
print(encodeFace)
cv2.rectangle(img1, (face[3], face[0]), (face[1], face[2]), (255, 0, 255), 2)

faceTest = face_recognition.face_locations(img1Test)[0]
encodeTestFace = face_recognition.face_encodings(img1Test)[0]
cv2.rectangle(img1Test, (faceTest[3], faceTest[0]), (faceTest[1], faceTest[2]), (255, 0, 255), 2)

results = face_recognition.compare_faces([encodeFace], encodeTestFace)
faceDis = face_recognition.face_distance([encodeFace], encodeTestFace)
print(results, faceDis)
cv2.putText(img1Test, f'{results} {round(faceDis[0], 2)}', (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)

cv2.imshow('Obama', img1)
cv2.imshow('Obama Test', img1Test)
cv2.waitKey(0)
cv2.destroyAllWindows()
Ejemplo n.º 44
0
for _ in tqdm(range(50)):
    groups = []
    np.random.shuffle(faces)
    for face in faces:
        if len(groups) == 0:
            groups.append([face])
        else:
            group_this_face_belongs_to = None
            encoding_face_curr = get_face_encoding(face)

            for group_idx, group in enumerate(groups):
                face_group_repr = group[0]
                encoding_face_group_repr = get_face_encoding(face_group_repr)
                # encoding_face_group_repr = np.array([get_face_encoding(f) for f in group]).mean(0)

                if face_recognition.compare_faces([encoding_face_group_repr], encoding_face_curr, tolerance=0.65)[0]:
                    group_this_face_belongs_to = group_idx

            if group_this_face_belongs_to:
                groups[group_this_face_belongs_to].append(face)
            else:
                groups.append([face])
    num_groups.append(len(groups))

num_people = int(np.mean(num_groups))


if False:
    faces = Face.objects.all()
    face_encodings = np.array([np.frombuffer(bytes.fromhex(f.encoding)) for f in faces])
# sets working directory to current directory
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)

# make a list of all the available images
images = os.listdir('faces/')

# load your image
image_to_be_matched = face_recognition.load_image_file('2.jpg')

# encoded the loaded image into a feature vector
image_to_be_matched_encoded = face_recognition.face_encodings(
    image_to_be_matched)[0]

# iterate over each image
for image in images:
    # load the image
    current_image = face_recognition.load_image_file("faces/" + image)
    # encode the loaded image into a feature vector
    current_image_encoded = face_recognition.face_encodings(current_image)[0]
    # match your image with the image and check if it matches
    result = face_recognition.compare_faces([image_to_be_matched_encoded],
                                            current_image_encoded)
    # check if it was a match
    if result[0] == True:
        print("Matched: " + image)
    else:
        print("Not matched: " + image)
Ejemplo n.º 46
0
    total_face_encoding.append(
        face_recognition.face_encodings(
            face_recognition.load_image_file(path + "/" + fn))[0])
    fn = fn[:(len(fn) - 4)]  #截取图片名(这里应该把images文件中的图片名命名为为人物名)
    total_image_name.append(fn)  #图片名字列表
while (1):
    ret, frame = cap.read()
    # 发现在视频帧所有的脸和face_enqcodings
    face_locations = face_recognition.face_locations(frame)
    face_encodings = face_recognition.face_encodings(frame, face_locations)
    # 在这个视频帧中循环遍历每个人脸
    for (top, right, bottom, left), face_encoding in zip(
            face_locations, face_encodings):
        # 看看面部是否与已知人脸相匹配。
        for i, v in enumerate(total_face_encoding):
            match = face_recognition.compare_faces(
                [v], face_encoding, tolerance=0.5)
            name = "Unknown"
            if match[0]:
                name = total_image_name[i]
                break
        # 画出一个框,框住脸
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
        # 画出一个带名字的标签,放在框下
        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255),
                      cv2.FILLED)
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                    (255, 255, 255), 1)
    # 显示结果图像
    cv2.imshow('Video', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
def verify(v, we):
    import remove as f
    known_face_encodings = []
    known_face_names = []
    m = 1

    f.clean(v)
    t = None
    l = []
    r = []
    b = []
    u = []
    fonts = cv2.FONT_HERSHEY_SIMPLEX
    w = 0
    # org
    org = (50, 50)

    # fontScale
    fontScale = 1.3

    # Blue color in BGR
    color = (255, 0, 0)

    # Line thickness of 2 px
    thickness = 4

    s = []
    all_face_encodings = {}
    path1 = v + "/"
    h = ""
    d = []
    listing = os.listdir(path1)
    for file in listing:
        k = str(file)
        print(file)
        for p in k:
            if p == '.':
                break
            h = h + p

        d.append(h)
        h = ""
    print("Face Training and Encoding##", end="")
    for o in d:

        shah_image = face_recognition.load_image_file(v + "/" + str(o) +
                                                      ".jpg")
        np.save(v + "/" + str(o), shah_image)
        print("#", end="")

        shah_encoding = face_recognition.face_encodings(shah_image)[0]
        np.save(v + "/" + str(o) + "-en", shah_encoding)
        print("#", end="")
    for o in d:

        shah_image = np.load(v + "/" + str(o) + ".npy")
        all_face_encodings[str(o)] = face_recognition.face_encodings(
            shah_image)[0]
        print("#", end="")
    with open(v + "/" + 'dataset_sali1.dat', 'wb') as f:
        pickle.dump(all_face_encodings, f)
    print("Completed")
    print("\nComparison Starting [##", end="")
    for o in d:
        print("##", end="")
        shah_encoding = np.load(v + "/" + str(o) + "-en.npy")
        known_face_encodings.append(shah_encoding)
        known_face_names.append(str(o))
    test_images = face_recognition.load_image_file(we)
    face_locations = face_recognition.face_locations(test_image)
    face_encoding = face_recognition.face_encodings(test_image, face_locations)
    pil_image = Image.fromarray(test_image)
    draw = ImageDraw.Draw(pil_image)

    for (top, right, bottom,
         left), face_encoding in zip(face_locations, face_encodings):
        matches = face_recognition.compare_faces(known_face_encodings,
                                                 face_encoding)
        print("#", end="")
        name = "Unknown Person"
        if True in matches:
            print("#", end="")
            first_match_index = matches.index(True)
            name = known_face_names[first_match_index]
        draw.rectangle(((left + 2, top + 2), (right + 2, bottom + 2)),
                       outline=(0, 255, 255),
                       width=5)
        print("#", end="")
        text_width, text_height = draw.textsize(name)
        #draw.rectangle(((left,bottom-text_height-10),(right,bottom)),fill=(0,0,0),outline=(0,0,0),width=5)
        #draw.text((left+6,bottom-text_height-5),name,fill=(255,255,255,255))
        l.append(left)
        r.append(right)
        print("#", end="")
        u.append(text_height)
        b.append(bottom)
        #  print(name)
        s.append(name)
        print("#", end="")
        w = w + 1
    del draw
    import ex as et
    # print(s)
    ku = et.reads(s, v)
    print("##]Completed")
    pu = 0
    for ju in ku:
        pu = pu + 1
    if pu == 0:
        print("All are present")

    else:
        for ju in ku:
            print(ju)
        print("are absent")
    img = np.array(pil_image)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    # print(s)
    for t in s:
        print(".", end="")

    if t is not None:
        for z in range(0, w):
            # print(t)
            img = cv2.putText(img, s[z], (l[z] + 6, b[z] - u[z]), fonts,
                              fontScale, color, thickness, cv2.LINE_AA)

# scale_percent = 60 # percent of original size
#width = int(img.shape[1] * scale_percent / 100)
#height = int(img.shape[0] * scale_percent / 100)
#dim = (width, height)
#img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)

    cv2.imshow("test", img)
    print("press any key to continue")
    cv2.waitKey(0)
    cv2.destroyAllWindows()