예제 #1
0
    def test_compare_faces(self):
        img_a1 = api.load_image_file(
            os.path.join(os.path.dirname(__file__), "test_images",
                         "obama.jpg"))
        img_a2 = api.load_image_file(
            os.path.join(os.path.dirname(__file__), "test_images",
                         "obama2.jpg"))
        img_a3 = api.load_image_file(
            os.path.join(os.path.dirname(__file__), "test_images",
                         "obama3.jpg"))

        img_b1 = api.load_image_file(
            os.path.join(os.path.dirname(__file__), "test_images",
                         "biden.jpg"))

        face_encoding_a1 = api.face_encodings(img_a1)[0]
        face_encoding_a2 = api.face_encodings(img_a2)[0]
        face_encoding_a3 = api.face_encodings(img_a3)[0]
        face_encoding_b1 = api.face_encodings(img_b1)[0]

        faces_to_compare = [
            face_encoding_a2, face_encoding_a3, face_encoding_b1
        ]

        match_results = api.compare_faces(faces_to_compare, face_encoding_a1)
        assert match_results[0] == True
        assert match_results[1] == True
        assert match_results[2] == False
예제 #2
0
def test_image(image_to_check,
               known_names,
               known_face_encodings,
               tolerance=0.6):
    unknown_image = face_recognition.load_image_file(image_to_check)

    # Scale down image if it's giant so things run a little faster
    if unknown_image.shape[1] > 1600:
        scale_factor = 1600.0 / unknown_image.shape[1]
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            unknown_image = scipy.misc.imresize(unknown_image, scale_factor)

    unknown_encodings = face_recognition.face_encodings(unknown_image)

    for unknown_encoding in unknown_encodings:
        result = face_recognition.compare_faces(known_face_encodings,
                                                unknown_encoding,
                                                tolerance=tolerance)

        if True in result:
            [
                print("{},{}".format(image_to_check, name))
                for is_match, name in zip(result, known_names) if is_match
            ]
        else:
            print("{},unknown_person".format(image_to_check))
예제 #3
0
파일: face.py 프로젝트: rebi14/graduation
def test_image(image_to_check,
               known_names,
               known_face_encodings,
               tolerance=0.6,
               show_distance=False):
    unknown_image = face_recognition.load_image_file(image_to_check)

    # Scale down image if it's giant so things run a little faster
    if max(unknown_image.shape) > 1600:
        pil_img = PIL.Image.fromarray(unknown_image)
        pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS)
        unknown_image = np.array(pil_img)

    unknown_encodings = face_recognition.face_encodings(unknown_image)
    face_names = []
    for unknown_encoding in unknown_encodings:
        distances = face_recognition.face_distance(known_face_encodings,
                                                   unknown_encoding)
        result = list(distances <= tolerance)

        match = face_recognition.compare_faces(known_face_encodings,
                                               unknown_encoding, 0.5)
        name = "Unknown"
        for k in range(len(match)):
            if match[k]:
                name = known_names[k]
        face_names.append(name)
    if not unknown_encodings:
        # print out fact that no faces were found in image
        print(image_to_check, "no_persons_found", None, show_distance)

    return face_names
예제 #4
0
def test_image_output_json(image_to_check, known_names, known_face_encodings):
    unknown_image = face_recognition.load_image_file(image_to_check)

    # Scale down image if it's giant so things run a little faster
    if unknown_image.shape[1] > 1600:
        scale_factor = 1600.0 / unknown_image.shape[1]
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            unknown_image = scipy.misc.imresize(unknown_image, scale_factor)

    unknown_encodings = face_recognition.face_encodings(unknown_image)

    output = list()

    for unknown_encoding in unknown_encodings:
        result = face_recognition.compare_faces(known_face_encodings,
                                                unknown_encoding)

        if True in result:
            [
                output.append({
                    'imagePath': image_to_check,
                    'name': name
                }) for is_match, name in zip(result, known_names) if is_match
            ]
        else:
            [
                output.append({
                    'imagePath': image_to_check,
                    'name': 'unknown_name'
                })
            ]

    return output
예제 #5
0
def test_image(image_to_check, known_names, known_face_encodings):
    unknown_image = face_recognition.load_image_file(image_to_check)

    # Scale down image if it's giant so things run a little faster
    if unknown_image.shape[1] > 1600:
        scale_factor = 1600.0 / unknown_image.shape[1]
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            unknown_image = scipy.misc.imresize(unknown_image, scale_factor)

    unknown_encodings = face_recognition.face_encodings(unknown_image)
    print("unknown_encodings " + str(unknown_encodings))

    if len(unknown_encodings) == 1:
        for unknown_encoding in unknown_encodings:
            result = face_recognition.compare_faces(known_face_encodings,
                                                    unknown_encoding)
            distance = face_recognition.face_distance(known_face_encodings,
                                                      unknown_encoding)
            print(distance[0])
            print("True") if True in result else print("False ")

        return distance[0], result[0]
    else:
        return "0", "Many Faces or No Faces"
예제 #6
0
def test_image(image_to_check, known_names, known_face_encodings):
    unknown_image = face_recognition.load_image_file(image_to_check)

    # Scale down image if it's giant so things run a little faster
    if unknown_image.shape[1] > 1600:
        scale_factor = 1600.0 / unknown_image.shape[1]
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            unknown_image = scipy.misc.imresize(unknown_image, scale_factor)

    unknown_encodings = face_recognition.face_encodings(unknown_image)

    for unknown_encoding in unknown_encodings:
        result = face_recognition.compare_faces(known_face_encodings, unknown_encoding)
      
        output=""


        if True in result:

          
            for is_match, name in zip(result, known_names):
                if is_match:
                    output += ("{} ".format(name))

           
        print(output)
    def test_compare_faces(self):
        img_a1 = api.load_image_file(
            os.path.join(os.path.dirname(__file__), 'test_images',
                         'obama.jpg'))
        img_a2 = api.load_image_file(
            os.path.join(os.path.dirname(__file__), 'test_images',
                         'obama2.jpg'))
        img_a3 = api.load_image_file(
            os.path.join(os.path.dirname(__file__), 'test_images',
                         'obama3.jpg'))

        img_b1 = api.load_image_file(
            os.path.join(os.path.dirname(__file__), 'test_images',
                         'biden.jpg'))

        face_encoding_a1 = api.face_encodings(img_a1)[0]
        face_encoding_a2 = api.face_encodings(img_a2)[0]
        face_encoding_a3 = api.face_encodings(img_a3)[0]
        face_encoding_b1 = api.face_encodings(img_b1)[0]

        faces_to_compare = [
            face_encoding_a2, face_encoding_a3, face_encoding_b1
        ]

        match_results = api.compare_faces(faces_to_compare, face_encoding_a1)

        self.assertEqual(type(match_results), list)
        self.assertTrue(match_results[0])
        self.assertTrue(match_results[1])
        self.assertFalse(match_results[2])
    def test_compare_faces_empty_lists(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg'))
        face_encoding = api.face_encodings(img)[0]

        # empty python list
        faces_to_compare = []

        match_results = api.compare_faces(faces_to_compare, face_encoding)
        self.assertEqual(type(match_results), list)
        self.assertListEqual(match_results, [])

        # empty numpy list
        faces_to_compare = np.array([])

        match_results = api.compare_faces(faces_to_compare, face_encoding)
        self.assertEqual(type(match_results), list)
        self.assertListEqual(match_results, [])
    def test_compare_faces_empty_lists(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg'))
        face_encoding = api.face_encodings(img)[0]

        # empty python list
        faces_to_compare = []

        match_results = api.compare_faces(faces_to_compare, face_encoding)
        self.assertEqual(type(match_results), list)
        self.assertListEqual(match_results, [])

        # empty numpy list
        faces_to_compare = np.array([])

        match_results = api.compare_faces(faces_to_compare, face_encoding)
        self.assertEqual(type(match_results), list)
        self.assertListEqual(match_results, [])
def test():
    # loop over unknown faces
    print("processing unknown faces")
    for filename in os.listdir(UNKNOWN_FACES_DIR):
        # load the image
        print(f"Filename {filename}", end="")
        image = FR.load_image_file(f"{UNKNOWN_FACES_DIR}/{filename}")

        # Find the location of the faces
        locations = FR.face_locations(image, model=MODEL)

        # pass locations to face_encodings to cut down on processing time
        encodings = FR.face_encodings(image, locations)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        print(f",found {len(encodings)} face(s)")

        for face_encoding, face_location in zip(encodings, locations):
            results = FR.compare_faces(known_faces, face_encoding, TOLERENCE)
            match = None

            if True in results:
                match = known_names[results.index(True)]
                print(f"Match Found {match}")
                # dimensions of where the face is
                top_left = (face_location[3], face_location[0])
                bottom_right = (face_location[1], face_location[2])

                # draw rectangle on image
                color = [0, 255, 0]
                cv2.rectangle(image, top_left, bottom_right, color, FRAME_THICKNESS)

                top_left = (face_location[3], face_location[0])
                bottom_right = (face_location[1], face_location[2] + 22)
                # cv2.rectangle(image, top_left, bottom_right, color, cv2.FILLED)
                cv2.putText(
                    image,
                    match,
                    (face_location[3] + 10, face_location[2] + 15),
                    cv2.FONT_HERSHEY_COMPLEX,
                    0.5,
                    (200, 200, 200),
                    FONT_THICKNESS,
                )

        cv2.imshow(filename, image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
    def test_face_matching(self):
        ic_path = os.path.join(test_data_path, 'my ic.jpg')
        driving_license_path = os.path.join(test_data_path,
                                            'my driving license.jpg')

        image_ic = api.load_image_file(ic_path)
        image_driving_license = api.load_image_file(driving_license_path)

        face_encoding_ic = api.face_encodings(image_ic)[0]
        face_encoding_driving = api.face_encodings(image_driving_license)[0]

        self.tolerance = 0.50
        match_results = api.compare_faces([face_encoding_ic],
                                          face_encoding_driving,
                                          tolerance=self.tolerance)

        self.assertEqual(type(match_results), list)
        self.assertTrue(match_results[0])
예제 #12
0
def test_image(image_to_check, known_names, known_face_encodings):
    unknown_image = face_recognition.load_image_file(image_to_check)

    # Scale down image if it's giant so things run a little faster
    if unknown_image.shape[1] > 1600:
        scale_factor = 1600.0 / unknown_image.shape[1]
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            unknown_image = scipy.misc.imresize(unknown_image, scale_factor)

    unknown_encodings = face_recognition.face_encodings(unknown_image)

    for unknown_encoding in unknown_encodings:
        result = face_recognition.compare_faces(known_face_encodings, unknown_encoding)

        if True in result:
            [print("{},{}".format(image_to_check, name)) for is_match, name in zip(result, known_names) if is_match]
        else:
            print("{},unknown_person".format(image_to_check))
예제 #13
0
파일: facerec1.py 프로젝트: nerdogan/nenra
    def test_compare_faces(self):
        img_a1 = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama.jpg"))
        img_a2 = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama2.jpg"))
        img_a3 = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama3.jpg"))

        img_b1 = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "biden.jpg"))

        face_encoding_a1 = api.face_encodings(img_a1)[0]
        face_encoding_a2 = api.face_encodings(img_a2)[0]
        face_encoding_a3 = api.face_encodings(img_a3)[0]
        face_encoding_b1 = api.face_encodings(img_b1)[0]

        faces_to_compare = [
            face_encoding_a2,
            face_encoding_a3,
            face_encoding_b1
        ]

        match_results = api.compare_faces(faces_to_compare, face_encoding_a1)
        assert match_results[0] == True
        assert match_results[1] == True
        assert match_results[2] == False
    def test_compare_faces(self):
        img_a1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
        img_a2 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama2.jpg'))
        img_a3 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama3.jpg'))

        img_b1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg'))

        face_encoding_a1 = api.face_encodings(img_a1)[0]
        face_encoding_a2 = api.face_encodings(img_a2)[0]
        face_encoding_a3 = api.face_encodings(img_a3)[0]
        face_encoding_b1 = api.face_encodings(img_b1)[0]

        faces_to_compare = [
            face_encoding_a2,
            face_encoding_a3,
            face_encoding_b1]

        match_results = api.compare_faces(faces_to_compare, face_encoding_a1)

        self.assertEqual(type(match_results), list)
        self.assertTrue(match_results[0])
        self.assertTrue(match_results[1])
        self.assertFalse(match_results[2])
예제 #15
0
def main():
    vs = VideoStream()
    vs.start()
    names, known_encodings = load_known_faces('./faces/known_faces')
    print(len(known_encodings))
    while vs.isOpened():
        image = vs.read()

        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        face_locations = fr.face_locations(image, model='hog')
        img_face_encodings = fr.face_encodings(image, face_locations)
        match_matrix = [
            fr.compare_faces(known_encodings, f, tolerance=0.6)
            for f in img_face_encodings
        ]
        print(match_matrix)
        img_with_faces = draw_bbox_on_img(image, face_locations)

        cv2.imshow('frame', img_with_faces)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    vs.close()
    cv2.destroyAllWindows()
    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    rgb_small_frame = small_frame[:, :, ::-1]

    # Only process every other frame of video to save time
    if process_this_frame:
        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(
            rgb_small_frame, face_locations)

        face_names = []
        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            matches = face_recognition.compare_faces(known_face_encodings,
                                                     face_encoding,
                                                     tolerance=0.35)
            name = "Unknown"

            # # If a match was found in known_face_encodings, just use the first one.
            # if True in matches:
            #     first_match_index = matches.index(True)
            #     name = known_face_ids[first_match_index]

            # Or instead, use the known face with the smallest distance to the new face
            face_distances = face_recognition.face_distance(
                known_face_encodings, face_encoding)
            # print(face_distances)
            best_match_index = np.argmin(face_distances)
            if matches[best_match_index]:
                name = known_face_ids[best_match_index]
    print("faces loaded!")
    print("starting camera")
    # record
    cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    cap.set(cv2.CAP_PROP_FPS, 30)

    while True:
        ret, frame = cap.read()
        frame = np.array(frame)

        locations = FR.face_locations(frame, model=MODEL)
        encodings = FR.face_encodings(frame, locations)
        print(f"found {len(encodings)} face(s)")

        for face_encoding, face_location in zip(encodings, locations):
            results = FR.compare_faces(known_faces, face_encoding, TOLERENCE)
            match = None
            if True in results:
                match = known_names[results.index(True)]
                print(f"Match Found {match}")
                # dimensions of where the face is
                top_left = (face_location[3], face_location[0])
                bottom_right = (face_location[1], face_location[2])

                # draw rectangle on image
                color = [0, 255, 0]
                cv2.rectangle(frame, top_left, bottom_right, color, FRAME_THICKNESS)

                top_left = (face_location[3], face_location[0])
                bottom_right = (face_location[1], face_location[2] + 22)
                # cv2.rectangle(image, top_left, bottom_right, color, cv2.FILLED)
def mark_your_attendance():

    mpl.rcParams['toolbar'] = 'None'
    STORAGE_PATH = "/home/harsh/face-recognition-attendance-system/storage"

    try:
        with open(os.path.join(STORAGE_PATH, "known_face_ids.pickle"),
                  "rb") as fp:
            known_face_ids = pickle.load(fp)
        with open(os.path.join(STORAGE_PATH, "known_face_encodings.pickle"),
                  "rb") as fp:
            known_face_encodings = pickle.load(fp)
        # known_face_ids = np.load("known_face_ids.npy")
        # known_face_encodings = np.load("known_face_encodings.npy")
    except:
        known_face_encodings = []
        known_face_ids = []

    # CSV_PATH = "/home/harsh/Backup/face-recognition/data/attendance.csv"
    CSV_PATH = "/home/harsh/face-recognition-attendance-system/static/data/attendance.csv"

    if (os.path.exists(CSV_PATH)):
        csv_file = open(CSV_PATH, "a+")
        writer = csv.writer(csv_file)

    else:
        os.mknod(CSV_PATH)
        csv_file = open(CSV_PATH, "w+")
        writer = csv.writer(csv_file)
        writer.writerow(["Student ID", "Date", "Time of Entry"])

    name = "Unknown"
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True
    sanity_count = 0
    unknown_count = 0
    marked = True

    video_capture = cv2.VideoCapture(0)
    ret, frame = video_capture.read()

    plot = plt.subplot(1, 1, 1)
    plt.title("Detecting Face")
    plt.axis('off')
    im1 = plot.imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()
        # print("FRAME READ WORKS")
        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(known_face_encodings,
                                                         face_encoding,
                                                         tolerance=0.35)
                name = "Unknown"

                # # If a match was found in known_face_encodings, just use the first one.
                # if True in matches:
                #     first_match_index = matches.index(True)
                #     name = known_face_ids[first_match_index]

                # Or instead, use the known face with the smallest distance to the new face
                face_distances = face_recognition.face_distance(
                    known_face_encodings, face_encoding)
                # print(face_distances)
                try:
                    best_match_index = np.argmin(face_distances)
                except:
                    # print("No students have been marked")
                    video_capture.release()
                    cv2.destroyAllWindows()
                    marked = False
                    return marked
                if matches[best_match_index]:
                    name = known_face_ids[best_match_index]

                face_names.append(name)

        if (name == "Unknown"):
            unknown_count += 1
        else:
            unknown_count = 0

        if (unknown_count == 600):
            # video_capture.release()
            # cv2.destroyAllWindows()
            # print("You haven't been registered")
            marked = False
            unknown_count = 0
            break

        process_this_frame = not process_this_frame

        # Display the results
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.7,
                        (255, 255, 255), 1)

        # print("BEFORE sHOWING")
        # Display the resulting image
        # cv2.imshow('Video', frame)
        # if cv2.waitKey(20) == 27:
        #     break

        plt.ion()
        im1.set_data(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        plt.pause(0.001)
        # as opencv loads in BGR format by default, we want to show it in RGB.
        plt.show()

        # print("AFTER SHOWING")
        # Hit 'q' on the keyboard to quit!
        if (sanity_count == 0):
            prev_name = name
            sanity_count += 1

        elif (sanity_count < 60):
            if (prev_name == name and name != "Unknown"):
                sanity_count += 1
                prev_name = name
            else:
                sanity_count = 0

        elif (sanity_count == 60):
            # print("Face registered")
            # video_capture.release()
            # cv2.destroyAllWindows()
            sanity_count = 0
            now = datetime.now()
            dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
            date = dt_string.split(" ")[0]
            time = dt_string.split(" ")[1]
            writer.writerow([name, date, time])
            # print(name + date + time)
            break

    # Release handle to the webcam

    plt.close()
    video_capture.release()
    cv2.destroyAllWindows()

    return marked
def mark_your_attendance(location, known_face_encodings, known_face_ids):

    mpl.rcParams['toolbar'] = 'None'

    if (os.path.exists(DB_PATH)):
        #rdbms='sqlite'
        #conn = psycopg2.connect(DB_PATH)
        #c=conn.cursor()
        rdbms = 'postgresql'
        conn = psycopg2.connect(host="localhost",
                                database="face_rec_db",
                                user="******",
                                password="******")
        c = conn.cursor()
    else:
        #os.mknod(DB_PATH)
        conn = sqlite3.connect(DB_PATH)
        c = conn.cursor()
        c.execute('''CREATE TABLE IF NOT EXISTS ATTENDANCE
         (ID        TEXT   NOT NULL,
         TIMESTAMP  TEXT       NOT NULL,
         LOCATION  TEXT);''')
        conn.commit()

    name = "Unknown"
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True
    sanity_count = 0
    unknown_count = 0
    marked = True

    video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    #_,frame = video_capture.read()

    while True:
        # Grab a single frame of video
        _, frame = video_capture.read()

        #Applying face enhancement steps
        frame = imageEnhancement.adjust_gamma(frame, gamma=1.5)

        # Resize frame of video to 1/4 size for faster face recognition processing
        #small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
        small_frame = frame
        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(known_face_encodings,
                                                         face_encoding,
                                                         tolerance=0.35)
                name = "Unknown"

                # # If a match was found in known_face_encodings, just use the first one.
                # if True in matches:
                #     first_match_index = matches.index(True)
                #     name = known_face_ids[first_match_index]

                # Or instead, use the known face with the smallest distance to the new face
                face_distances = face_recognition.face_distance(
                    known_face_encodings, face_encoding)
                # print(face_distances)
                try:
                    best_match_index = np.argmin(face_distances)
                except:
                    # print("No students have been marked")
                    video_capture.release()
                    cv2.destroyAllWindows()
                    marked = False
                    return marked
                if matches[best_match_index]:
                    name = known_face_ids[best_match_index]

                face_names.append(name)

        if name == "Unknown":
            unknown_count += 1
        else:
            unknown_count = 0

        if unknown_count == 600:
            # video_capture.release()
            # cv2.destroyAllWindows()
            # print("You haven't been registered")
            marked = False
            unknown_count = 0
            break

        process_this_frame = not process_this_frame

        # Display the results
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            #top *= 4
            #right *= 4
            #bottom *= 4
            #left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.7,
                        (255, 255, 255), 1)

        # print("AFTER SHOWING")
        # Hit 'q' on the keyboard to quit!
        if (sanity_count == 0):
            prev_name = name
            sanity_count += 1

        elif sanity_count < 60:
            if (prev_name == name and name != "Unknown"):
                sanity_count += 1
                prev_name = name
            else:
                sanity_count = 0

        elif sanity_count == 60:
            # print("Face registered")
            # video_capture.release()
            # cv2.destroyAllWindows()
            sanity_count = 0
            # now = datetime.now()
            # if(entry_or_exit==0):
            #     c.execute("INSERT INTO ATTENDANCE VALUES (?,datetime('now'),'IN');",(name, ))
            # else:
            #     c.execute("INSERT INTO ATTENDANCE VALUES (?,datetime('now'),'OUT');",(name, ))

            if (rdbms == 'sqlite'):
                c.execute(
                    "INSERT INTO ATTENDANCE VALUES (?,datetime('now'),?);", (
                        name,
                        location,
                    ))
            elif (rdbms == 'postgresql'):
                c.execute("INSERT INTO attendance VALUES (%s,now(),%s);",
                          (name, location))
            conn.commit()

            break

        #OpenCV's implementation to show an image in window(doesn't work on production server)
        #cv2.imshow("Marking Attendance (PRESS Q TO QUIT)",frame)

        #Encoding the frame to be stream into browser
        frame = cv2.imencode('.jpg', frame)[1].tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')

        #if cv2.waitKey(20) == ord("q"):
        #    break

    # Release handle to the webcam

    #plt.close()
    video_capture.release()
    cv2.destroyAllWindows()
    conn.close()

    return marked
예제 #20
0
    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    rgb_small_frame = small_frame[:, :, ::-1]

    # Only process every other frame of video to save time
    if process_this_frame:
        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(
            rgb_small_frame, number_of_times_to_upsample=2)
        face_encodings = face_recognition.face_encodings(
            rgb_small_frame, face_locations)

        face_names = []
        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            matches = face_recognition.compare_faces(x_train,
                                                     face_encoding,
                                                     tolerance=0.5)
            name = "Unknown"

            # If a match was found in known_face_encodings, just use the first one.
            if True in matches:
                first_match_index = matches.index(True)
                name = y_lables[first_match_index]

            face_names.append(name)

    process_this_frame = not process_this_frame

    # Display the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        # Scale back up face locations since the frame we detected in was scaled to 1/4 size
예제 #21
0
def analyseFootage(clipname):
    CLIP_PATH = FOOTAGES_PATH + "/" + clipname

    if os.path.isfile(CLIP_PATH) == False:
        return 0

    #Load the known face IDs and encodings for facial recognition
    try:
        with open(os.path.join(STORAGE_PATH, "known_face_ids.pickle"),
                  "rb") as fp:
            known_face_ids = pickle.load(fp)
        with open(os.path.join(STORAGE_PATH, "known_face_encodings.pickle"),
                  "rb") as fp:
            known_face_encodings = pickle.load(fp)
    except:
        known_face_encodings = []
        known_face_ids = []

    #Start the Video Stream
    fvs = FileVideoStream(CLIP_PATH).start()
    time.sleep(1.0)

    print("[INFO] Loading the facial detector")
    detector = dlib.get_frontal_face_detector()
    #predictor = dlib.shape_predictor(LANDMARK_PATH)
    #fa = FaceAligner(predictor, desiredFaceWidth = 96)
    name = "Unknown"
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True
    #sanity_count = 0
    unknown_count = 0
    marked = True

    print("[INFO] Initializing CCTV Footage")
    while fvs.more():
        # grab the frame from the threaded video file stream, resize
        # it, and convert it to grayscale (while still retaining 3
        # channels)
        frame = fvs.read()

        if frame is None:
            break

        frame = imutils.resize(frame, width=600)

        frame = adjust_gamma(frame, gamma=1.5)
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        #To store the faces
        #This will detect all the images in the current frame, and it will return the coordinates of the faces
        #Takes in image and some other parameter for accurate result
        faces = detector(gray_frame, 0)
        #In above 'faces' variable there can be multiple faces so we have to get each and every face and draw a rectangle around it.

        #sampleNum = sampleNum+1
        for face in faces:
            #num_frames = num_frames + 1
            #print("inside for loop")

            if face is None:
                print("face is none")
                continue

            #face_aligned = fa.align(frame,gray_frame,face)
            #face_aligned = imutils.resize(face_aligned ,width = 600)

            if process_this_frame:
                # Find all the faces and face encodings in the current frame of video
                face_locations = face_recognition.face_locations(frame)
                face_encodings = face_recognition.face_encodings(
                    frame, face_locations)

                face_names = []
                for face_encoding in face_encodings:
                    # See if the face is a match for the known face(s)
                    matches = face_recognition.compare_faces(
                        known_face_encodings, face_encoding, tolerance=0.35)
                    name = "Unknown"

                    # # If a match was found in known_face_encodings, just use the first one.
                    # if True in matches:
                    #     first_match_index = matches.index(True)
                    #     name = known_face_ids[first_match_index]

                    # Or instead, use the known face with the smallest distance to the new face
                    face_distances = face_recognition.face_distance(
                        known_face_encodings, face_encoding)
                    # print(face_distances)
                    try:
                        best_match_index = np.argmin(face_distances)
                        if matches[best_match_index]:
                            name = known_face_ids[best_match_index]
                    except:
                        # print("No students have been marked")
                        #video_capture.release()
                        #cv2.destroyAllWindows()

                        marked = False
                        #return marked
                    #if matches[best_match_index]:
                    #    name = known_face_ids[best_match_index]

                    face_names.append(name)

            if name == "Unknown":
                unknown_count += 1
            else:
                unknown_count = 0

            if unknown_count == 600:
                # video_capture.release()
                # cv2.destroyAllWindows()
                # print("You haven't been registered")
                marked = False
                unknown_count = 0
                break

            process_this_frame = not process_this_frame

            for (top, right, bottom,
                 left), name in zip(face_locations, face_names):

                # Draw a box around the face
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255),
                              1)

                # Draw a label with a name below the face
                cv2.rectangle(frame, (left, bottom + 15), (right, bottom),
                              (0, 0, 255), cv2.FILLED)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(frame, name, (left + 6, bottom + 15), font, 0.4,
                            (255, 255, 255), 1)

        #Showing the image in another window
        #Creates a window with window name "Face" and with the image img
        #cv2.imshow("Video feed (PRESS Q TO QUIT",frame)
        frame = cv2.imencode('.jpg', frame)[1].tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')

        #if cv2.waitKey(1) == ord("q") :
        #    break

    print("here")
    # do a bit of cleanup
    cv2.destroyAllWindows()
    #fvs.stop()
    return
예제 #22
0
import face_recognition.api as face_recognition

known_image = face_recognition.load_image_file(
    "face_recognition_github/examples/biden.jpg")
unknown_image = face_recognition.load_image_file(
    "face_recognition_github/tests/test_images/obama3.jpg")

biden_encoding = face_recognition.face_encodings(known_image)[0]
unknown_encoding = face_recognition.face_encodings(unknown_image)[0]

results = face_recognition.compare_faces([biden_encoding], unknown_encoding)

loc = face_recognition.face_locations(unknown_image)

if (loc):
    print("good")
else:
    print("step in the frame")