Exemple #1
0
 def check(self, detected_face):
     # we could use detected landmarks, but I did not manage to do so. TODO The copy/paste below should help
     encodings = face_recognition.face_encodings(detected_face.image)
     if encodings is not None and len(encodings) > 0:
         distances = list(face_recognition.face_distance(self.encodings, encodings[0]))
         distance = avg(distances)
         mindistance = min(distances)
         maxdistance = max(distances)
         if distance > self.threshold:
             print("Distance above threshold: %f < %f" % (distance, self.threshold))
             return False
         if len(self.nencodings) > 0:
           ndistances = list(face_recognition.face_distance(self.nencodings, encodings[0]))
           ndistance = avg(ndistances)
           nmindistance = min(ndistances)
           nmaxdistance = max(ndistances)
           if (mindistance > nmindistance):
               print("Distance to negative sample is smaller")
               return False
           if (distance > ndistance):
               print("Average distance to negative sample is smaller")
               return False
           # k-nn classifier
           K=min(5, min(len(distances), len(ndistances)) + 1)
           N=sum(list(map(lambda x: x[0],
                 list(sorted([(1,d) for d in distances] + [(0,d) for d in ndistances],
                             key=lambda x: x[1]))[:K])))
           ratio = N/K
           if (ratio < 0.5):
               print("K-nn is %.2f" % ratio)
               return False
         return True
     else:
         print("No face encodings found")
         return False
 def get_avg_score_faces(f1encs, references):
     import_face_recognition()
     scores = []
     for f2encs in references:
         score = face_recognition.face_distance(f1encs, f2encs)[0]
         scores.append(score)
     return sum(scores) / len(scores)
Exemple #3
0
    def sort_face(self):
        input_dir = self.args.input_dir

        print("Sorting by face similarity...")

        img_list = [[x, face_recognition.face_encodings(cv2.imread(x))]
                    for x in
                    tqdm(self.find_images(input_dir),
                         desc="Loading",
                         file=sys.stdout)]

        img_list_len = len(img_list)
        for i in tqdm(range(0, img_list_len - 1),
                      desc="Sorting",
                      file=sys.stdout):
            min_score = float("inf")
            j_min_score = i + 1
            for j in range(i + 1, len(img_list)):
                f1encs = img_list[i][1]
                f2encs = img_list[j][1]
                if f1encs is not None and f2encs is not None and len(
                        f1encs) > 0 and len(f2encs) > 0:
                    score = face_recognition.face_distance(f1encs[0],
                                                           f2encs)[0]
                else:
                    score = float("inf")

                if score < min_score:
                    min_score = score
                    j_min_score = j
            img_list[i + 1] = img_list[j_min_score]
            img_list[j_min_score] = img_list[i + 1]

        return img_list
Exemple #4
0
    def sort_face_dissim(self):
        input_dir = self.args.input_dir

        print("Sorting by face dissimilarity...")

        img_list = [[x, face_recognition.face_encodings(cv2.imread(x)), 0]
                    for x in
                    tqdm(self.find_images(input_dir),
                         desc="Loading",
                         file=sys.stdout)]

        img_list_len = len(img_list)
        for i in tqdm(range(0, img_list_len), desc="Sorting", file=sys.stdout):
            score_total = 0
            for j in range(0, img_list_len):
                if i == j:
                    continue
                try:
                    score_total += face_recognition.face_distance(
                        [img_list[i][1]],
                        [img_list[j][1]])
                except:
                    pass

            img_list[i][2] = score_total

        print("Sorting...")
        img_list = sorted(img_list, key=operator.itemgetter(2), reverse=True)
        return img_list
Exemple #5
0
 def get_avg_score_faces(f1encs, references):
     """ Return the average similarity score between a face and
         reference image """
     scores = []
     for f2encs in references:
         score = face_recognition.face_distance(f1encs, f2encs)[0]
         scores.append(score)
     return sum(scores) / len(scores)
Exemple #6
0
 def check(self, detected_face):
     encodings = face_recognition.face_encodings(detected_face.image) # we could use detected landmarks, but I did not manage to do so. TODO The copy/paste below should help
     if encodings is not None and len(encodings) > 0:
         score = face_recognition.face_distance([self.encoding], encodings[0])
         print(score)
         return score <= self.threshold
     else:
         print("No face encodings found")
         return False
Exemple #7
0
def test_image(image_to_check, tolerance=0.6):
    recognized_faces = []

    unknown_image = face_recognition.load_image_file(image_to_check)

    # Scale down image if it's giant so things run a little faster
    unknown_image = scale_image(unknown_image)

    unknown_encodings = face_recognition.face_encodings(unknown_image)
    face_landmarks_list = face_recognition.face_landmarks(unknown_image)
    face_locations = face_recognition.face_locations(unknown_image)

    pil_image = Image.fromarray(unknown_image)
    d = ImageDraw.Draw(pil_image)

    if not unknown_encodings:
        # print out fact that no faces were found in image
        print_result(image_to_check, "no_persons_found", None)

    else:
        for unknown_encoding, face_landmarks, face_location in zip(unknown_encodings, face_landmarks_list,
                                                                   face_locations):
            distances = face_recognition.face_distance(known_face_encodings, unknown_encoding)

            for distance, name in zip(distances, known_names):
                if distance <= tolerance:
                    print_result(image_to_check, name, distance)
                    recognized_faces.append(
                        {'name': name, 'dist': distance, 'landmarks': face_landmarks, 'face_location': face_location}
                    )
                else:
                    print_result(image_to_check, "unknown_person", None)

        for item in recognized_faces:
            face_landmarks = item['landmarks']
            face_location = item['face_location']
            # Print the location of each facial feature in this image
            # Let's trace out each facial feature in the image with a line!
            for facial_feature in face_landmarks.keys():
                print("The {} in this face has the following points: {}".format(facial_feature,
                                                                                face_landmarks[facial_feature]))
                d.line(face_landmarks[facial_feature], width=3)

            # Print the location of each face in this image
            top, right, bottom, left = face_location
            print(
                "A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom,
                                                                                                      right))
            d.rectangle(((left, top), (right, bottom)), outline=4)
            font = ImageFont.truetype("font/arial.ttf", size=30)
            title = item['name']
            text_size = d.textsize(title, font)
            d.text((left, bottom - text_size[1]), title, font=font, fill='white')

    pil_image.save("data/recognition_results/result.jpg")

    return recognized_faces
Exemple #8
0
def choose_best(faces, tmp_dir):

    if len(faces) == 0:
        return ""

    # we need at least 3 faces to do any serious job
    if len(faces) < 3:
        return faces[0]

    encoded_faces = {}

    # encode all faces for speedup
    for file in faces:

        filename = os.path.basename(file)
        enc_file_name = tmp_dir + "/" + filename + ".enc"

        try:
            encoded = _utils.get_encoded(file, enc_file_name)

        except _utils.FaceNotFoundError:
            continue

        encoded_faces[file] = encoded

    fresults = {}

    # check each one with each one
    for file, face_data in encoded_faces.items():

        total_distance = 0;
        count = 0

        for file2, face_data2 in encoded_faces.items():
            if file == file2:
                continue

            results = face_recognition.face_distance( [face_data], face_data2)
            total_distance += results[0]
            count += 1

        if count > 0:
            avg_distance = total_distance / count
            fresults[file] = avg_distance

    # find photo with best avg distance to other photos
    print(fresults)

    if len(fresults) > 0:
        best = max(fresults.items(), key = operator.itemgetter(1))

        return best[0]
    else:
        return ""
Exemple #9
0
def get_face(img, target_encodings):
    img = np.array(img)
    locations = face_recognition.face_locations(img, model="cnn")
    encodings = face_recognition.face_encodings(img, locations)
    landmarks = face_recognition.face_landmarks(img, locations)
    if len(locations) == 0:
        return None, None, None, None, None
    if target_encodings is not None:
        distances = [ face_recognition.face_distance([target_encodings], encoding) for encoding in encodings ]
        idx_closest = distances.index(min(distances))
        target_face, target_landmarks = locations[idx_closest], landmarks[idx_closest]
    else:
        target_face, target_landmarks = locations[0], landmarks[0]
    top, right, bottom, left = target_face
    x, y, w, h = left, top, right-left, bottom-top
    return x, y, w, h, target_landmarks
Exemple #10
0
    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    rgb_frame = frame[:, :, ::-1]

    # Find all the faces and face encodings in the current frame of video
    face_locations = face_recognition.face_locations(rgb_frame)
    face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)

    face_names = []
    for face_encoding in face_encodings:
        
        # See if the face is a match for the known face(s)
        matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
        name = "Unknown"

        face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
        best_match_index = np.argmin(face_distances)
        if matches[best_match_index]:
            name = known_face_names[best_match_index]

        face_names.append(name)

    # Label the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        if not name:
            continue

        # Draw a box around the face
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

        # Draw a label with a name below the face
def f():
    cam = cv2.VideoCapture(0)

    known_faces = get_encoded_faces()
    face = list(known_faces.values())
    names = list(known_faces.keys())

    process_frame = True

    face_locations = []
    face_encodings = []

    while True:
        ret, frame = cam.read()

        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        rgb_small_frame = small_frame[:, :, ::-1]  #convertion from bgr to rgb

        if process_frame:
            face_locations = fr.face_locations(rgb_small_frame)
            face_encodings = fr.face_encodings(rgb_small_frame, face_locations)
            face_names = []

            for face_encoding in face_encodings:
                matches = fr.compare_faces(face, face_encoding)
                name = "Unknown"

                face_distances = fr.face_distance(face, face_encoding)
                best_match_index = np.argmin(face_distances)

                if matches[best_match_index]:
                    name = names[best_match_index]

                face_names.append(name)

        process_frame = not process_frame

        for (y1, x2, y2, x1), name in zip(face_locations, face_names):
            x1 *= 4
            x2 *= 4
            y1 *= 4
            y2 *= 4

            cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)

            cv2.rectangle(frame, (x1, y2 - 30), (x2, y2), (0, 255, 0),
                          cv2.FILLED)

            font = cv2.FONT_HERSHEY_DUPLEX

            cv2.putText(frame, name, (x1 + 6, y2 - 6), font, 1.0,
                        (255, 255, 255), 1)

        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cam.release()
    cv2.destroyAllWindows()
Exemple #12
0
    faces_in_image = fr.face_encodings(as_numpy_arr)
    encoding_generation_times.append(pc() - t0)
    print(f"\tFound {len(faces_in_image)} faces")

    # Determine expected results
    expected_people: List[KnownPerson] = [
        x for x in KnownPeople if x.Name in file
    ]

    # Track actual results
    found_people: List[KnownPerson] = []

    # Actual facial recognition logic
    for face in faces_in_image:
        t1 = pc()
        compare_results = fr.face_distance([x.Encoding for x in KnownPeople],
                                           face)
        face_compare_times.append(pc() - t1)

        # Filter list step by step. I broke this into multiple lines for debugging.
        possible_matches = [
            ComparedFace(KnownPeople[i], compare_results[i])
            for i in range(len(compare_results))
        ]

        # Remove faces that too unlike the face we're checking.
        possible_matches: List[ComparedFace] = [
            x for x in possible_matches if x.Distance < TOLERANCE
        ]
        #print(f"\tBy tolerance, {len(possible_matches)} likely matches: ", ', '.join([str(x) for x in possible_matches]))

        # Removing already-found people
Exemple #13
0
cap = cv2.VideoCapture(0)
success, imgUser = cap.read()
imgUser = cv2.cvtColor(imgUser, cv2.COLOR_BGR2RGB)
encodeUser = face_recognition.face_encodings(imgUser)

cap = cv2.VideoCapture(0)
while True:
    success, img = cap.read()
    imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
    imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
    facesCurFrame = face_recognition.face_locations(imgS)
    encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
    for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
        maches = face_recognition.compare_faces(encodeUser, encodeFace)
        faceDist = face_recognition.face_distance(encodeUser, encodeFace)
        print(faceDist)
        matchIndex = np.argmin(faceDist)
        if maches[matchIndex]:
            name = "User"
            print(name)
            y1, x2, y2, x1 = faceLoc
            y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
            cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
            cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0),
                          cv2.FILLED)
            cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX,
                        1, (255, 255, 255), 2)
        else:
            name = "Not User"
            print(name)
Exemple #14
0
def face_distance(encoding1, encoding2):
    face_distances = face_recognition.face_distance([encoding1], encoding2)
    return face_distances
def anayze_faces(args):

    # Initialize the variables for operation.
    known_face_encodings = []
    known_face_names = []
    is_video = True
    face_names = []
    process_this_frame = True

    faces_path = Path(args["registered_faces_dir"])

    # Check whether the registered_faces_dir exist
    if not os.path.exists(faces_path):
        print("Invalid registered_faces_dir")
        return

    # Check whether it is an image input or video stream.
    if args["recognize_image"]:
        recognize_image = Path(args["recognize_image"])
        is_video = False

    # Get all the registered faces.
    face_files = paths.list_images(faces_path)

    # Read image and encode it.
    for face_file in face_files:
        print("Process the file :{}".format(face_file))
        name = face_file.split(os.path.sep)[-1].split('.')[-2]
        image = face_recognition.load_image_file(face_file)
        known_face_names.append(name)
        known_face_encodings.append(face_recognition.face_encodings(image)[0])

    # Loop until the user terminate
    while True:

        if is_video:
            # Get a reference to a webcame index as 0.
            # Change parameter as required.
            video_capture = cv2.VideoCapture(0)
            # Grab a single frame of video
            _, frame = video_capture.read()
        else:
            frame = cv2.imread(str(recognize_image))

        if frame is None:
            print("Invalid Image.")
            break

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                # Tune the tolerance parameters as desired
                # Based on the tolerance ratio, we check whether the input
                # face is similar to which registered faces.
                matches = face_recognition.compare_faces(known_face_encodings,
                                                         face_encoding,
                                                         tolerance=0.5)
                name = "UNKNOWN"

                # Find the face that look the most similar to the registered faces.
                face_distances = face_recognition.face_distance(
                    known_face_encodings, face_encoding)
                best_match_index = np.argmin(face_distances)
                similar_rate = (1 - face_distances[best_match_index]) * 100

                # Check if the detected face is a known face
                if matches[best_match_index]:
                    name = known_face_names[best_match_index]

                face_names.append((name, similar_rate))

        process_this_frame = not process_this_frame

        # Display the results
        for (top, right, bottom,
             left), (name, similar_rate) in zip(face_locations, face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            font = cv2.FONT_HERSHEY_DUPLEX
            if name == "UNKNOWN":
                color = (0, 0, 255)
            else:
                color = (0, 255, 0)
                # Draw a label with a name below the face
                cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                              color, cv2.FILLED)
                # Draw the text on the frame
                cv2.putText(frame, "{:.2f}%".format(similar_rate),
                            (left + 6, bottom - 6), font, 1.0, (255, 255, 255),
                            1)

            cv2.putText(frame, name, (left + 6, top - 6), font, 1.0,
                        (255, 255, 0), 1)

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), color, 2)

        # Display the resulting image
        cv2.imshow('Video', frame)

        if not is_video:
            # Wait for any key to be pressed.
            cv2.waitKey(0)

            # Save the detected image to the same directory with suffix _detected
            new_filename = recognize_image.stem + "_detected.jpg"
            new_filepath = recognize_image.parents[0]
            cv2.imwrite(str(new_filepath / new_filename), frame)

            # Exit the loop as it is not a video stream.
            break

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    if is_video:
        # Release handle to the webcam
        video_capture.release()

    cv2.destroyAllWindows()
Exemple #16
0
    def addrec():
        msg=''
        if request.method == 'POST':
            mydb = mysql.connector.connect(
                host="localhost",
                user="******",
                password="",
                database="attendance_db"
            )
            mycursor = mydb.cursor()
            subject_name = request.form['subject_name']
            date = request.form['date']
            time = request.form['time']
            batch = request.form['batch']
            mycursor2 = mydb.cursor()
            status_a="A"
            mycursor2.execute("SELECT roll_no FROM student_subject where subject_name=%s",(subject_name,))
            rollno = mycursor2.fetchall()
            for row in rollno:
                mycursor.execute('INSERT INTO attendance(subject_name,date,time_slot,batch,roll_no,status) VALUES (%s,%s,%s,%s,%s,%s)',(subject_name,date,time,batch,row[0],status_a))
            
            mydb.commit()

            def RetreiveImages():
                SQLStatement2 = "SELECT rollno,image from student"
                mycursor.execute(SQLStatement2)
                myresult = mycursor.fetchall()
                for row in myresult:
                    name = row[0]
                    photo = row[1]
                    storefilepath = "C:/Users/Oggy/.spyder-py3/face_recog/images/{0}.jpg".format(str(name))
                    write_file(photo,storefilepath)
        
            def write_file(data, filepath):
                with open(filepath, "wb") as File:
                    File.write(data)
                File.close()
        

            RetreiveImages()

            path = 'C:/Users/Oggy/.spyder-py3/face_recog/images'
            imgList = []
            personNames = []
            myList = os.listdir(path)
            print(myList)
            for cls in myList:
                curImg = cv2.imread(f'{path}/{cls}')
                imgList.append(curImg)
                personNames.append(os.path.splitext(cls)[0])

            def findEncodings(imgList):
                encodeList = []
                for img in imgList:
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                    encode = face_recognition.face_encodings(img)[0]
                    encodeList.append(encode)
                return encodeList

            encodeListStoredImages = findEncodings(imgList)
            cap = cv2.VideoCapture(0)
            studentsPresent = []

            while True:
                success, img = cap.read()
                imgwebcam = cv2.resize(img,(0,0),None,0.25,0.25)
                imgwebcam = cv2.cvtColor(imgwebcam, cv2.COLOR_BGR2RGB)
                facesInCurrFrame = face_recognition.face_locations(imgwebcam)
                encodeCurrFrame = face_recognition.face_encodings(imgwebcam,facesInCurrFrame)
    
                for encodeFace,faceLoc in zip(encodeCurrFrame,facesInCurrFrame):
                    matches = face_recognition.compare_faces(encodeListStoredImages, encodeFace)
                    faceDis = face_recognition.face_distance(encodeListStoredImages, encodeFace)
                    matchIndex = np.argmin(faceDis)
        
                    if matches[matchIndex]:
                        name = personNames[matchIndex].upper()
                        if name not in studentsPresent:
                            studentsPresent.append(name)
                            SQLStatement2 = "UPDATE attendance SET status=%s WHERE roll_no=%s"
                            val = ("P",name)
                            mycursor.execute(SQLStatement2,val)
                            mydb.commit()
                        y1,x2,y2,x1 = faceLoc
                        y1,x2,y2,x1 = y1*4,x2*4,y2*4,x1*4
                        cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),2)
                        cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
                        cv2.putText(img,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,255),2)
            
                cv2.imshow('Webcam',img)
                #cv2.waitKey(1)
                if cv2.waitKey(20) & 0xFF == ord('q'):
                    break;
            cap.release()
            cv2.destroyAllWindows()
            return render_template('list_of_student.html', studentsPresent=studentsPresent)
Exemple #17
0
knownFaces = findEncodings(images)
print('Encoding complete')

cap = cv2.VideoCapture(0)

while True:
    success, img = cap.read()
    imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
    imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)

    faceLoc = face_recognition.face_locations(imgS)
    faceEncode = face_recognition.face_encodings(imgS, faceLoc)

    for encoder, location in zip(faceEncode, faceLoc):
        matches = face_recognition.compare_faces(knownFaces, encoder)
        faceDis = face_recognition.face_distance(knownFaces, encoder)
        print(faceDis)

        matchIndex = np.argmin(faceDis)

        if matches[matchIndex]:
            name = students[matchIndex].upper()
            print(name)
            x1, x2, y1, y2 = location
            x1, x2, y1, y2 = x1 * 4, x2 * 4, y1 * 4, y2 * 4
            cv2.rectangle(img, (y2, x1), (x2, y1), (0, 255, 0), 2)
            cv2.putText(img, name, (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1,
                        (0, 255, 0), 2)
            markAttendance(name)

    cv2.imshow('Webcam', img)
Exemple #18
0
cap = cv2.VideoCapture(0)

while (cap.isOpened()):
    success, frame = cap.read()
    frame_sm = cv2.resize(frame, (0, 0), None, 0.25, 0.25)
    frame_sm = cv2.cvtColor(frame_sm, cv2.COLOR_BGR2RGB)

    faces_locs = face_recognition.face_locations(frame_sm)
    encodings = face_recognition.face_encodings(frame_sm, faces_locs)

    for encoding, face in zip(encodings, faces_locs):
        matches = face_recognition.compare_faces(encode_list,
                                                 encoding,
                                                 tolerance=0.5)
        distance = face_recognition.face_distance(encode_list, encoding)
        match_index = np.argmin(distance)

        if matches[match_index]:
            name = class_names[match_index].upper()
            y1, x2, y2, x1 = face
            y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
            cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
            cv2.rectangle(frame, (x1, y2 - 35), (x2, y2), (0, 255, 0),
                          cv2.FILLED)
            cv2.putText(frame, name, (x1 + 6, y2 - 6),
                        cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)

    cv2.imshow('Webcam', frame)
    if cv2.waitKey(1) == ord('q'):
        break
Exemple #19
0
    def post(self, event_id):

        event = Event.query.get(event_id)
        attendees = event.attendee
        encodeList = []
        nameList = []
        ids = []
        # print(attendees)
        for attendee in attendees:
            url = attendee.image
            url_response = urllib.request.urlopen(url)
            img_array = np.array(bytearray(url_response.read()),
                                 dtype=np.uint8)
            img = cv2.imdecode(img_array, -1)
            encode = face_recognition.face_encodings(img)[0]
            encodeList.append(encode)
            nameList.append(attendee.name)
            ids.append(attendee.id)
        # print(encodeList)
        data = request.get_json()
        url = data["url"]
        url_response = urllib.request.urlopen(url)
        img_array = np.array(bytearray(url_response.read()), dtype=np.uint8)
        img = cv2.imdecode(img_array, -1)
        facesCurFrame = face_recognition.face_locations(img)
        encodingsCurFrame = face_recognition.face_encodings(img, facesCurFrame)
        name = ""
        for encodeFace, faceLoc in zip(encodingsCurFrame, facesCurFrame):
            matches = face_recognition.compare_faces(encodeList, encodeFace)
            dist = face_recognition.face_distance(encodeList, encodeFace)
            print(matches)
            print(dist)

            matchIndex = np.argmin(dist)

            if matches[matchIndex]:
                name = nameList[matchIndex].upper()
                attendee_status = Status.query.filter_by(
                    event_id=event_id, attendee_id=ids[matchIndex]).first()
                attendee_status.status = True
                db.session.commit()
            else:
                name = "Unknown"

        result = dict()

        if name == "Unknown":
            result["message"] = "Attendee not found in registered list"
            result["name"] = "Unknown"

        else:
            result["message"] = "Attendee present in registered list"
            result["name"] = name

        del facesCurFrame
        del encodingsCurFrame
        del img
        del img_array
        del encodeList
        del nameList

        return result
imgs = cv2.resize(frame, (0, 0), None, 1.5,
                  1.5)  #The image is enlarged 1.5 times

facesCurrFrame = face_recognition.face_locations(
    imgs
)  #the faces in the image are located and their 4 parameter location(top left and bottom right) is stored.
encCurrFrame = face_recognition.face_encodings(
    imgs, facesCurrFrame)  #their face encodings are obtained.

for encFace, faceLoc in zip(encCurrFrame, facesCurrFrame):
    matches = face_recognition.compare_faces(
        enclistKnown, encFace
    )  #gives a boolean array with True or False depending on whether matching faces are found or not
    facedist = face_recognition.face_distance(
        enclistKnown, encFace
    )  #this command finds the Euclidian distance for each comaprison face.
    matchindex = np.argmin(
        facedist)  #the index of the minimum Euclidian distance is stored
    if matches[matchindex]:
        name = classnames[matchindex].upper()  #capitalizing the names
        print(name)
        y1, x2, y2, x1 = faceLoc  #gives location of the face in a rectangular area
        imgs = cv2.rectangle(
            imgs, (x1 - 5, y1 - 5), (x2 + 5, y2 + 5), (255, 0, 0),
            2)  #rectangle of required size is drawn around face
        imgs = cv2.rectangle(
            imgs, (x1 - 5, y2 - 10), (x2 + 5, y2 + 5), (255, 0, 0),
            cv2.FILLED)  #thick border under the rectangle to write the name
        imgs = cv2.putText(
            imgs, name, (x1 - 2, y2), cv2.FONT_HERSHEY_COMPLEX, 0.4,
Exemple #21
0
 def distance_statistics(self):
     encodings = [face.encoding for face in self.faces]
     distances = face_recognition.face_distance(encodings, self.encoding)
     return min(distances), np.mean(distances), max(distances)
Exemple #22
0
def get_face_distances_with_encoding(face_encoding, known_faces):
    #matches = face_recognition.compare_faces(known_faces, face_encoding)
    distances = face_recognition.face_distance(known_faces, face_encoding)
    return distances
Exemple #23
0
def analisar():
    video_capture = cv2.VideoCapture(0)
    # Initialize some variables
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(
                    encodings, face_encoding)
                name = "Desconhecido"

                # # If a match was found in known_face_encodings, just use the first one.
                # if True in matches:
                #     first_match_index = matches.index(True)
                #     name = known_face_names[first_match_index]

                # Or instead, use the known face with the smallest distance to the new face
                face_distances = face_recognition.face_distance(
                    encodings, face_encoding)
                best_match_index = np.argmin(face_distances)
                if matches[best_match_index]:
                    name = names[best_match_index]

                face_names.append(name)

        process_this_frame = not process_this_frame

        # Display the results
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)

            for x in face_names:
                return x
            break

    # Release handle to the webcam
    video_capture.release()
Exemple #24
0
def face(img, pathToFile, file_hash, current_time):
	# connection to database
	conn = psycopg2.connect(database='s14g09_IMDB_ColorPrediction', user='******', password="******", host='movie.cdnh3cwt5np2.us-east-1.rds.amazonaws.com', port='5432')

	# connet to DB
	cur = conn.cursor()

	# get list of nconsts from DB
	cur.execute('SELECT nconst from actors')
	known_face_nconsts = cur.fetchall()

	# get list of face encodings from DB, convert from json to list
	cur.execute('SELECT face_encoding from actors')
	db_face_encodings = cur.fetchall()
	db_face_encodings = list(db_face_encodings)

	# set up know face encodings array
	known_face_encodings = []

	# process all tuple face encodings from db
	for i, face_encoding in enumerate(db_face_encodings):
		# turn into properly formatted numpy array
		face_encoding = list(face_encoding[0])
		face_encoding = np.array(face_encoding)

		# convert face_encoding to numpy array
		face_encoding = np.array(face_encoding)

		# add to known face encodings array
		known_face_encodings.append(face_encoding)

	# set image paths
	origPath = os.path.join(path, f'og-fc-{file_hash}-{current_time}.jpg')
	newPath = os.path.join(path, f'pr-fc-{file_hash}-{current_time}.jpg')

	#scale down image for processing
	image = cv2.imread(pathToFile, 0)
	image = imutils.resize(image, height=1000)

	# temporarily write image to disk
	cv2.imwrite(origPath, image)

	# load image
	image = face_recognition.load_image_file(origPath)

	# find faces, store number of faces
	face_locations = face_recognition.face_locations(image)
	num_faces = len(face_locations)

	# create face encodings of each found faces
	face_encodings = face_recognition.face_encodings(image, face_locations, num_jitters=2)

	# set up identified actors array
	identified_actors = []

	# convert to PIL format image and init draw instance
	pil_image = Image.fromarray(image)
	draw = ImageDraw.Draw(pil_image)

	# process each face found
	for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
		# dimensions
		left -= 20
		top -= 20
		right += 20
		bottom += 20

		# find possible matches
		matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=0.5)

		name = "Unknown"

		# get euclidean distance for the face
		face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)

		# find known face with smallest distance
		best_match_index = np.argmin(face_distances)
		if matches[best_match_index]:
			# associate known face with nconst
			nconst = known_face_nconsts[best_match_index][0]

			# find actor ID via nconst and extract value
			cur.execute("""SELECT name from actors WHERE nconst = '{}'""".format(nconst))
			name = cur.fetchone()
			name = name[0]

			# add name to identiifed actors array
			identified_actors.append(name)

		# draw rectangle over face
		draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255))

		# font size configuration
		fontsize = 1
		img_fraction = 0.10

		# calculate font size
		font = ImageFont.truetype("BarlowSemiCondensed-Medium.ttf", fontsize)
		while font.getsize(name)[0] < img_fraction*pil_image.size[0]:
			fontsize += 1
			font = ImageFont.truetype("BarlowSemiCondensed-Medium.ttf", fontsize)

		# Draw a label with a name below the face
		text_width, text_height = draw.textsize(name, font=font)
		draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 255), outline=(0, 0, 255))
		draw.text((left + 6, bottom - text_height - 10), name, fill=(255, 255, 255, 255), font=font)

	# delete draw instance
	del draw

	# save image to disk
	pil_image.save(newPath)

	# return results in a dict
	results = {'num_faces': num_faces, 'identified_actors': identified_actors,
            'face_file': newPath.replace('static/', '')}
	return results
def FaceRec():

    # ImageFirebase.BringFromFirebase()

    path = r'/Users/admin/VScode/Mini-Project-III-Python/Final-Code/ResFaceRecog/'
    images = []
    classNames = []

    myList = os.listdir(path)
    print(myList)

    for cls in myList:

        curImg = cv2.imread(f'{path}/{cls}')

        images.append(curImg)

        classNames.append(os.path.splitext(cls)[0])

    print(classNames)

    def compareFirebase():
        print("Hello")

    def findEncodings(images):

        encodeList = []

        for img in images:

            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            encode = face_recognition.face_encodings(img)[0]

            encodeList.append(encode)

        return encodeList

    # def MarkAttendance(name):

    #     with open(r'/Users/admin/VScode/Mini-Project-III-Python/FaceRecognition/Final/Previous/Attendance.csv', 'r+') as f:

    #         myDataList = f.readlines()
    #         nameList = []

    #         # print(myDataList)

    #         for line in myDataList:

    #             entry = line.split(',')
    #             nameList.append(entry[0])

    #         if name not in nameList:

    #             now = datetime.now()

    #             dateString = now.strftime('%H:%M:%S')

    #             f.writelines(f'\n{name},{dateString}')

    encodeListKnown = findEncodings(images)
    print('Encoding Complete')

    cap = cv2.VideoCapture(0)

    flag = True

    while True:

        while flag == True:

            success, img = cap.read()

            imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)

            imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)

            facesCurFrame = face_recognition.face_locations(imgS)
            encodingsCurFrame = face_recognition.face_encodings(
                imgS, facesCurFrame)

            for encodeFace, faceLoc in zip(encodingsCurFrame, facesCurFrame):

                matches = face_recognition.compare_faces(
                    encodeListKnown, encodeFace)

                FaceDist = face_recognition.face_distance(
                    encodeListKnown, encodeFace)

                # print(FaceDist)

                matchIndex = np.argmin(FaceDist)

                if matches[matchIndex]:
                    name = classNames[matchIndex].upper()
                    # print(name)
                    flag = False

                    y1, x2, y2, x1 = faceLoc
                    y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4

                    cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)

                    cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0),
                                  cv2.FILLED)

                    cv2.putText(img, name, (x1 + 6, y2 - 6),
                                cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255),
                                2)

                    print("Welcome to MITSOE " + name)
                    # MarkAttendance(name)

            cv2.imshow('Webcam', img)
            cv2.waitKey(1)
            # flag = 1

        if flag == False:
            break

    # testguirandom.finalScreen()
    return name
                # Here we wil have the input face features
                with open("featuresOfTrainingImages.txt",
                          "rb") as fp:  # Unpickling
                    featuresOfTrainingImages = pickle.load(fp)
                with open("imgNames.txt", "rb") as fp:
                    imgNames = pickle.load(fp)
                with open("images.txt", "rb") as fp:
                    images = pickle.load(fp)
                # matching the input feature with the loaded images features.
                attendance = []
                for encodeInput, facesOfInput in zip(inputFeatures,
                                                     faceLocation):
                    # we don't require matches we take distance as first preference for accuracy.
                    # matchs = face_recognition.compare_faces(featuresOfTrainingImages, encodeInput, tolerance=0.3)
                    # print(encodeInput)
                    faceDistance = face_recognition.face_distance(
                        featuresOfTrainingImages, encodeInput)

                    print(imgNames, "names")
                    print(faceDistance, "facedistance")
                    index = np.argmin(faceDistance)
                    print(imgNames[index])
                    if imgNames not in attendance:
                        attendance.append(imgNames[index])
            # cv2.imwrite(os.path.join(finalPath, 'InputImage.jpg'), imgInput)
            #
            # for i in range(len(faceDistance)):
            #     if faceDistance[i] == faceDistance[index]:
            #         print(imgNames[i])
            # for i in range(len(matchs)):
            #     if matchs[i]:
            #         print(i, matchs[i])
print('Encoding Complete')

cap = cv2.VideoCapture(0)

while True:
    success, img = cap.read()
    # img = captureScreen()
    imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
    imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)

    facesCurFrame = face_recognition.face_locations(imgS)
    encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)

    for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
        matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
        faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
        # print(faceDis)
        matchIndex = np.argmin(faceDis)

        if matches[matchIndex]:
            name = classNames[matchIndex].upper()
            # print(name)
            y1, x2, y2, x1 = faceLoc
            y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
            cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
            cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0),
                          cv2.FILLED)
            cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX,
                        1, (255, 255, 255), 2)
            markAttendance(name)
Exemple #28
0
def film():
    # Get a reference to webcam #0 (the default one)
    video_capture = cv2.VideoCapture(0)

    # Load a sample picture and learn how to recognize it.
    known_face_encodings = []
    known_face_names = []
    for person in query_all():
        path = "static/known_people/" + person.picture
        known_face_encodings.append(
            face_recognition.face_encodings(
                face_recognition.load_image_file(path))[0])
        known_face_names.append(person.name)
    # Initialize some variables
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(
                    known_face_encodings, face_encoding)
                name = "Unknown"

                # # If a match was found in known_face_encodings, just use the first one.
                # if True in matches:
                #     first_match_index = matches.index(True)
                #     name = known_face_names[first_match_index]

                # Or instead, use the known face with the smallest distance to the new face
                face_distances = face_recognition.face_distance(
                    known_face_encodings, face_encoding)
                best_match_index = np.argmin(face_distances)
                if matches[best_match_index]:
                    name = known_face_names[best_match_index]

                face_names.append(name)

        process_this_frame = not process_this_frame

        # Display the results
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)

        # Display the resulting image
        cv2.imshow('Video', frame)

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()
            dtstring = now.strftime('%H:%M:%S')
            f.writelines(f'\n{name},{dtstring}')


cap = cv2.VideoCapture(0)

while True:
    success, img = cap.read()
    imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
    imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
    facesCurFrame = fr.face_locations(imgS)
    encodeCurFrame = fr.face_encodings(imgS, facesCurFrame)

    for encodeface, faceLoc in zip(encodeCurFrame, facesCurFrame):
        matches = fr.compare_faces(encodelistknown, encodeface)
        faceDis = fr.face_distance(encodelistknown, encodeface)
        #print(faceDis)
        matchIndex = np.argmin(faceDis)

        if matches[matchIndex]:
            name = classnames[matchIndex].upper()
            #print(name)
            y1, x2, y2, x1 = faceLoc
            y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
            cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
            cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0),
                          cv2.FILLED)
            cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX,
                        1, (255, 255, 255), 2)
            markAttendance(name)
import cv2
import numpy as np
import face_recognition

img_abu = face_recognition.load_image('images/abubakar.jpg')
img_abu = cv2.cvtColor(img_abu, cv2.COLOR_BGR2RGB)

img_test = face_recognition.load_image('images/abubakar_test.jpg')
img_test = cv2.cvtColor(img_test, cv2.COLOR_BGR2RGB)


face_location = face_recognition.face_location(img_abu)
encode_abu = face_recognition.face_encodings(img_abu)[0]
cv2.rectangle(img_abu, (face_location[3], face_location[0]), (face_location[1], face_location[2]), (255, 0, 255, 0), 2)

face_location_test = face_recognition.face_location(img_test)
encode_test = face_recognition.face_encodings(img_test)[0]
cv2.rectangle(img_abu, (face_location_test[3], face_location_test[0]), (face_location_test[1], face_location_test[2]), (255, 0, 255, 0), 2)

results = face_recognition.compare_faces([encode_abu], encode_test)
face_dis = face_recognition.face_distance([encode_abu], encode_test)
print(results, face_dis)

cv2.putText(img_test, f'{results} {round(face_dis[0], 2)}', (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)

cv2.imshow('Abubakar', img_abu)
cv2.imshow('Abubakar', img_test)
cv2.waitKey(0)
Exemple #31
0
def gen_frames():
    process_this_frame = True
    while True:
        # Pega um único quadro do video
        ret, frame = video_capture.read()

        # Redimensiona o quadro para 1/4 para um processamento mais rápido
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Converte a imagem de BGR (do OpenCV) para cor RGB (usada pelo face_recognition)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Processa quadros alternados para economizar tempo
        if process_this_frame:
            # Encontra todos os rostos no quadro
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []
            for face_encoding in face_encodings:
                # Comparando o rosto com os rostos conhecidos
                matches = face_recognition.compare_faces(
                    known_face_encodings, face_encoding)
                name = "Desconhecido"

                # Usando o rosto conehcido que tem a menor distância para o novo
                face_distances = face_recognition.face_distance(
                    known_face_encodings, face_encoding)
                best_match_index = np.argmin(face_distances)

                if matches[best_match_index]:
                    name = users_analyse[best_match_index]

                face_names.append(name)

        process_this_frame = not process_this_frame

        # Exibe o resultado
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            # Dimensiona os locais dos rostos
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Desenha uma caixa ao redor do rosto
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Coloca o nome do cliente reconhecido
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)

        ret, buffer = cv2.imencode('.jpg', frame)
        frame = buffer.tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
# positive matches at the risk of more false negatives.

# Note: This isn't exactly the same as a "percent match". The scale isn't linear. But you can assume that images with a
# smaller distance are more similar to each other than ones with a larger distance.

# Load some images to compare against
known_obama_image = face_recognition.load_image_file("obama.jpg")
known_biden_image = face_recognition.load_image_file("biden.jpg")

# Get the face encodings for the known images
obama_face_encoding = face_recognition.face_encodings(known_obama_image)[0]
biden_face_encoding = face_recognition.face_encodings(known_biden_image)[0]

known_encodings = [
    obama_face_encoding,
    biden_face_encoding
]

# Load a test image and get encondings for it
image_to_test = face_recognition.load_image_file("obama2.jpg")
image_to_test_encoding = face_recognition.face_encodings(image_to_test)[0]

# See how far apart the test image is from the known faces
face_distances = face_recognition.face_distance(known_encodings, image_to_test_encoding)

for i, face_distance in enumerate(face_distances):
    print("The test image has a distance of {:.2} from known image #{}".format(face_distance, i))
    print("- With a normal cutoff of 0.6, would the test image match the known image? {}".format(face_distance < 0.6))
    print("- With a very strict cutoff of 0.5, would the test image match the known image? {}".format(face_distance < 0.5))
    print()
def verify(request):
    
    path = f'media/profile_images'

    images = []

    class_names = []
    
    name = ''

    my_list = os.listdir(path)

    print(my_list)

    for cls in my_list:
        current_image = cv2.imread(f'{path}/{cls}')
        images.append(current_image)
        class_names.append(cls.rsplit('.', 4)[0])
    # print(images)
    print(class_names)

    def find_encodings(images):
        encode_list = []
        for img in images:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            encode = face_recognition.face_encodings(img)[0]
            encode_list.append(encode)
        return encode_list

    def get_current_user(name=None):
        if name == None:
            attend_status_fail = True
            sys_status = 'Attendance not Captured. Try Again!!'
            context = {'attend_status_fail':attend_status_fail,'sys_status':sys_status,}
            return context
           
        else:
            attend_status_success = True
            sys_status = 'Attendance Captured.'
            user_profile = User.objects.filter(username=f'{name.lower()}')
            for user in user_profile:
                first_name = user.first_name
                last_name = user.last_name
                status = user.profile.status
                attendance_time = user.profile.attendance_time
                lateness = user.profile.lateness_ago
            
            context = {
                    'attend_status_success':attend_status_success,
                    'sys_status':sys_status,
                    'first_name':first_name,
                    'last_name':last_name,
                    'status':status,
                    'attendance_time':attendance_time,
                    'lateness':lateness,
                    }
        return context   
     
    def add_attendance(user_id, fullname, email, designation, status, late=False, lateness=None):
        attendance = Attendance.objects.create(
                        user_id = user_id,
                        fullname = fullname,
                        email = email,
                        designation = designation,
                        status = status,
                        late = late,
                        lateness = lateness,
                       
                    )
        attendance.save()
        print("Attendance recorded!!!")
        
    def mark_attendance(name):
         
        print(f'\n Taking Attendance for {name} !!')   
       
        current_date_and_time = datetime.datetime.now()
        print('Now          :', current_date_and_time)
        added_time = datetime.timedelta(minutes=1)
        new_time_line = current_date_and_time + added_time
        print ('New Time     :', new_time_line)
        
         # get current user details before marking attendance
        user_profile = User.objects.filter(username=f'{name.lower()}')
        for user in user_profile:
            user_id = user.id
            fullname = user.last_name + " " + user.first_name
            email = user.email
            designation = user.designation.title
            lateness_benchmark_time = datetime.datetime.strptime(user.designation.lateness_benchmark, '%H:%M:%S').time()
            lateness_benchmark =  datetime.datetime.combine(datetime.datetime.now(), lateness_benchmark_time)
            # status = user.profile.status
            # attendance_time = user.profile.attendance_time
            ban_time = user.profile.ban_time
            
            
        my_profile = Profile.objects.filter(username=f'{name.lower()}')
        for p in my_profile:
            status = p.status
            ban_time = p.ban_time
        
        if status == 'Signed Out': 
            if ban_time == None or current_date_and_time > ban_time:
              
                print()
               
                if current_date_and_time > lateness_benchmark:
                    
                    late = True
                    lateness_ago = timeago.format(lateness_benchmark, current_date_and_time)
                    late_duration = lateness_ago.rsplit(' ', 3)[0]
                    late_duration_2 = lateness_ago.rsplit(' ', 3)[1]
                    lateness = late_duration + ' ' + late_duration_2
                    print(f"You are late ooo..You passed you lateness benchmark  {lateness_ago}.")
                    
                    my_profile.update(status='Signed In', ban_time=new_time_line, attendance_time=current_date_and_time, lateness_ago=lateness_ago)
                    add_attendance(user_id, fullname, email, designation, "Signed In", late, lateness)
                
                if current_date_and_time < lateness_benchmark:
                    
                    late = False                    
                    my_profile.update(status='Signed In', ban_time=new_time_line, attendance_time=current_date_and_time, lateness_ago=None)
                    add_attendance(user_id, fullname, email, designation, "Signed In")
                    
        elif status == 'Signed In':
            if current_date_and_time > ban_time:
                my_profile.update(status='Signed Out', ban_time=new_time_line, attendance_time=current_date_and_time, lateness_ago=None)
                add_attendance(user_id, fullname, email, designation, "Signed Out")
         
        # get current user details after marking attendance       
        
              
        print(f'Currently {status} with ban time at {ban_time}')
                        


    encode_list_known = find_encodings(images)
    print('Encoding Complete')

    cap = cv2.VideoCapture(0)

    while True:
        success, image = cap.read()
        image_small = cv2.resize(image, (0,0), None, 0.25, 0.265)
        image_small = cv2.cvtColor(image_small, cv2.COLOR_BGR2RGB)
        
        faces_in_current_frame = face_recognition.face_locations(image_small)
        encodings_of_current_frame = face_recognition.face_encodings(image_small, faces_in_current_frame)
        
        for encode_face, face_loc in zip(encodings_of_current_frame, faces_in_current_frame):
            matches = face_recognition.compare_faces(encode_list_known, encode_face, tolerance=0.49)
            face_dist = face_recognition.face_distance(encode_list_known, encode_face)
            print(face_dist)
            match_index = np.argmin(face_dist)
            
            if matches[match_index]:
                name = class_names[match_index].upper()
                time = datetime.datetime.now()
                print(name)
                details = name + " - " + str(time)
                y1, x2, y2, x1 = face_loc
                y1, x2, y2, x1 = y1*4, x2*4, y2*4, x1*4
                cv2.rectangle(image, (x1,y1), (x2,y2), (0,255,0),2)
                cv2.rectangle(image, (x1,y2-35), (x2,y2), (0,255,0), cv2.FILLED)
                cv2.putText(image, details, (x1+6, y2-6), cv2.FONT_HERSHEY_COMPLEX, 0.7, (255,255,255), 2)
                
                mark_attendance(name)
                
            elif not matches[match_index]:
                name = "Unknown"
                # print(name)    
                y1, x2, y2, x1 = face_loc
                y1, x2, y2, x1 = y1*4, x2*4, y2*4, x1*4
                cv2.rectangle(image, (x1,y1), (x2,y2), (0,0,255),2)
                cv2.rectangle(image, (x1,y2-35), (x2,y2), (0,0,255), cv2.FILLED)
                cv2.putText(image, name, (x1+6, y2-6), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 2)
                
        cv2.imshow('Attendance Cam', image)
        key = cv2.waitKey(1)
        if key == 13:
            break
        
        
        # Release handle to the webcam
    cap.release()
    cv2.destroyAllWindows()
    if name == 'Unknown' or name =='':
        context = get_current_user()
    else:
        context = get_current_user(name.lower())
    return render(request, 'index.html', context)
Exemple #34
0
def attendance(semester, branch):

    try:
        loc = os.path.join("./students",branch+"_"+semester+".pkl")
        students = np.array(pickle.load(open(loc, "rb")))
    except:
        print "Unable to load student image details."
        print "Please make sure that it exists in students folder."
        return set()

    student_encodings = []
    for student in students:
    	student_encodings.append(student[0])
    face_locations = []
    cap = cv2.VideoCapture(0)
    present = []
    while(1):
    	ret,frame = cap.read()

    	frame = cv2.flip(frame,1)
    	frame = cv2.copyMakeBorder(frame,0,0,150,0,cv2.BORDER_CONSTANT,value=[0,0,0])

    	small_frame = cv2.resize(frame, (0,0), fx=1, fy=1)

    	faces = []

    	face_locations = face_recognition.face_locations(small_frame)
    	face_encodings  = face_recognition.face_encodings(small_frame, face_locations)

    	for face_en in face_encodings:
    		distances = face_recognition.face_distance(student_encodings,face_en)
    		index, difference = min(enumerate(distances), key=operator.itemgetter(1))
    		if(difference<=0.5):
    		    faces.append(students[index][1])
    		    present.append(students[index][1])
    		else:
    		    faces.append("Unknown")

    	if(len(faces)>0):
    		for (top,right,bottom,left),regno in zip(face_locations, faces):
    			top = top
    			right = right
    			bottom = bottom
    			left = left

    			color = (0,0,255)

    			if (regno!="Unknown"):
    				color = (0,255,0)
    			cv2.rectangle(frame, (left,top) , (right,bottom) , color, 2)

    		y=20
    		font = cv2.FONT_HERSHEY_COMPLEX
    		for regno in faces:
    			if regno!="Unknown":
    				cv2.putText(frame, regno, (20,y), font, 0.5, (255, 255, 255), 1)
    				y = y+20

    	cv2.imshow('FRAS',frame)
    	if cv2.waitKey(1) & 0xFF == ord('q'):
    		break

    cap.release()
    cv2.destroyAllWindows()
    return set(present)
Exemple #35
0
 def check(self, detected_face):
     encodings = face_recognition.face_encodings(detected_face.image)[0] # we could use detected landmarks, but I did not manage to do so
     score = face_recognition.face_distance([self.encoding], encodings)
     print(score)
     return score <= self.threshold
Exemple #36
0
def face_detect():
    myList = os.listdir(path)
    print(myList)

    for cls in myList:
        curImg = cv2.imread(f'{path}/{cls}')
        images.append(curImg)
        classNames.append(os.path.splitext(cls)[0])

    print(classNames)

    def findEncodings(images):
        encodeList = []
        for img in images:
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            encode = face_recognition.face_encodings(img)[0]
            encodeList.append(encode)
        return encodeList

    def markAttendance(name):
        with open("Attendance.csv", "r+") as f:
            myDataList = f.readlines()
            nameList = []
            for line in myDataList:
                entry = line.split(',')
                nameList.append(entry[0])
            if name not in nameList:
                now = datetime.now()
                dtString = now.strftime("%H:%M:%S")
                f.writelines(f'\n{name}, {dtString}')

    encodeListKnown = findEncodings(images)
    print("Декодирование закончено")

    while True:
        success, img = cap.read()
        imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
        imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)

        facesCurFrame = face_recognition.face_locations(imgS)
        encodeCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)

        for encodeFace, faceLoc in zip(encodeCurFrame, facesCurFrame):
            matches = face_recognition.compare_faces(encodeListKnown,
                                                     encodeFace)
            faceDis = face_recognition.face_distance(encodeListKnown,
                                                     encodeFace)
            #print(faceDis)
            matchIndex = np.argmin(faceDis)

            name = 'Unknown'

            if matches[matchIndex]:
                name = classNames[matchIndex]
                y1, x2, y2, x1 = faceLoc
                y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4

                cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 0), 3)
                cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 0, 0),
                              cv2.FILLED)

                font_path = 'Fonts/Roboto-Regular.ttf'
                font = ImageFont.truetype(font_path, 32)
                img_pil = Image.fromarray(img)
                b, g, r, a = 255, 255, 255, 0
                draw = ImageDraw.Draw(img_pil)
                draw.text((x1 + 6, y2 - 35),
                          str(name),
                          font=font,
                          fill=(b, g, r, a))
                frame = np.array(img_pil)
                markAttendance(name)

            else:
                filename = 'KnownFaces/face.jpg'
                cv2.imwrite(filename, img)
                print("Лицо сохранено")
                find_clone(filename)

        cv2.imshow("WebCam", frame)
        cv2.waitKey(1)
    face_names = []
    current_face_genders = []
    screen_face_locations = []

    for i in range(len(face_locations)):

        face_encoding = face_encodings[i]
        face_location = face_locations[i]

        # See if the face is a match for the known face(s)
        match = face_recognition.compare_faces(known_faces, face_encoding,tolerance = 0.6)

        indices_match = np.where(match)[0]

        if len([x for x in indices_match if x <=3]) > 1:
            index_match = [np.argmin(face_recognition.face_distance(known_faces, face_encoding)[:3])]
        else:
            index_match = indices_match

        name,unknown_face_num,known_faces,know_faces_names,get_moreface = noFaceMatched(text,unknown_face_num,known_faces,know_faces_names,face_encoding,get_moreface)

        cropFace,saveFName = saveFaceImg(face_location,name,frame,newpath)

        current_face_genders,known_face_genders,unknown_face_num,know_faces_names,known_faces = estReadNewImg(saveFName,model,known_face_genders,current_face_genders,unknown_face_num,known_faces,know_faces_names)

        face_names.append(name)
        screen_face_locations.append(face_location)
        #print(face_location,name)
        #print (time.time() - start)

    last_face_num = len(face_locations)
Exemple #38
0
def get_result(face_encodings_known_ndarray, face_names_known_ndarray, picture=None):
    """
    通过人脸识别算法,获取识别到的人脸,返回人名
    @param picture: 被识别图片路径
    @param face_encodings_known_ndarray: 人脸编码库矩阵
    @param face_names_known_ndarray: 人脸名库矩阵
    @return: name_list
    """
    # 识别到的名字列表
    name_list = []

    # 识别到的人脸
    face_names = []

    # 人脸位置
    face_locations = []

    # 该视频帧状态
    process_this_frame = True

    # 调用摄像头来识别人脸
    video_capture = cv2.VideoCapture(0)

    # 获取目标图片
    # picture = cv2.imread(picture)

    while True:
        # 抓取一帧视频
        ret, frame = video_capture.read()

        # 将图片设置为一帧
        # frame = picture

        # 将视频帧的大小调整为1/4以加快人脸识别处理
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # 将图像从BGR颜色(OpenCV使用)转换为RGB颜色(人脸识别使用)
        rgb_small_frame = small_frame[:, :, ::-1]

        # 仅每隔一帧处理一次视频以节省时间
        # 查找当前视频帧中的所有人脸位置和人脸编码
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)

        if process_this_frame:
            for k in face_encodings:
                # 设置默认名
                name = "Unknown"
                # 查看该人脸是否与已知人脸匹配
                matches = face_recognition.compare_faces(face_encodings_known_ndarray, k)

                # 如果在已知的面编码中找到匹配项,请使用第一个
                # if True in matches:
                #     first_match_index = matches.index(True)
                #     name = known_face_names[first_match_index]

                # 或者,使用与新人脸的距离最小的已知人脸
                face_distances = face_recognition.face_distance(face_encodings_known_ndarray, k)
                best_match_index = np.argmin(face_distances)
                if matches[best_match_index]:
                    name = face_names_known_ndarray[best_match_index]

                face_names.append(name)
        process_this_frame = not process_this_frame

        # 展示结果
        for (top, right, bottom, left), name in zip(face_locations, face_names):
            # 由于我们在检测过程中帧被缩放到1/4大小,因此放大备份面位置
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # 在脸上画一个方框
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # 在人脸下画一个有名字的标签
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)

        # 显示结果图像
        cv2.namedWindow('Picture', 0)
        cv2.imshow('Picture', frame)

        # 按键盘上的“q”键退出
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    for k in face_names:
        if k not in name_list:
            if k is not "Unknown":
                name_list.append(k)

    # 释放摄像头
    video_capture.release()
    cv2.destroyAllWindows()

    return name_list
Exemple #39
0
def recognize():
    video_capture = cv2.VideoCapture(0)

    df = pd.read_csv("StudentDetails.csv")
    # Load a sample picture and learn how to recognize it.
    for student in df.index:
        sname = str(df['Name'][student])
        imagerollNo = str(df['rollNo'][student])
        imageName = sname + "_" + imagerollNo
        image = sname + "image"
        known_face_names.append(sname)
        encodingname = sname + "_face_encoding"
        image = face_recognition.load_image_file("Students\\" + imageName +
                                                 ".jpg")
        encodingname = face_recognition.face_encodings(image)[0]
        known_face_encodings.append(encodingname)

    # Initialize some variables
    face_locations = []
    face_encodings = []
    face_names = []
    df = pd.read_csv("StudentDetails.csv")
    df.set_index('Name', inplace=True)
    col_names = ['rollNo', 'Name', 'Date', 'Time']
    attendance = pd.DataFrame(columns=col_names)
    process_this_frame = True

    while True:

        # Grab a single frame of video
        ret, frame = video_capture.read()

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(
                rgb_small_frame, number_of_times_to_upsample=1, model='hog')
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)

                matches = face_recognition.compare_faces(
                    known_face_encodings, face_encoding, 0.7)
                name = "Unknown"
                # # If a match was found in known_face_encodings, just use the first one.
                # if True in matches:
                #     first_match_index = matches.index(True)
                #     name = known_face_names[first_match_index]

                # Or instead, use the known face with the smallest distance to the new face
                face_distances = face_recognition.face_distance(
                    known_face_encodings, face_encoding)

                best_match_index = np.argmin(face_distances)
                if matches[best_match_index]:
                    name = known_face_names[best_match_index]

                    print(name)
                    # if int(name) in range(1,61):
                    #     sheet.cell(row=int(name), column=int(today)).value = "Present"
                    # else:
                    #     pass
                    ts = time.time()
                    date = datetime.datetime.fromtimestamp(ts).strftime(
                        '%Y-%m-%d')
                    timeStamp = datetime.datetime.fromtimestamp(ts).strftime(
                        '%H:%M:%S')
                    print(df.loc[name])
                    # aa=df.loc[df['rollNo'] == rollNo]['Name'].values
                    # print(aa)
                    # tt=str(rollNo)+"-"+aa
                    # attendance.loc[len(attendance)] = [rollNo,aa,date,timeStamp]

                face_names.append(name)
            attendance = attendance.drop_duplicates(subset=['rollNo'],
                                                    keep='first')

        process_this_frame = not process_this_frame

        # Display the results
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)

        # Display the resulting image
        cv2.imshow('Video', frame)

        # Save Woorksheet as present month
        # book.save(str(month)+'.xlsx')

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        # else:
        # ts = time.time()
        # date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
        # timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
        # Hour,Minute,Second=timeStamp.split(":")
        # fileName="Attendance\Attendance_"+date+"_"+Hour+"-"+Minute+"-"+Second+".csv"
        # attendance.to_csv(fileName,index=False)
        # res=attendance

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()
Exemple #40
0
def face_rec():
    global name
    global condition

    camSet = 'nvarguscamerasrc sensor-id=0 ! video/x-raw(memory:NVMM), width=3264, height=2464, framerate=21/1,format=NV12 ! nvvidconv flip-method=2 ! video/x-raw, width=800, height=600, format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink'
    video_capture = cv2.VideoCapture(camSet)

    # Load a sample picture and learn how to recognize it.
    ado_image = face_recognition.load_image_file("ado.jpg")
    ado_face_encoding = face_recognition.face_encodings(ado_image)[0]

    # Load a second sample picture and learn how to recognize it.
    plo_image = face_recognition.load_image_file("plo.jpg")
    plo_face_encoding = face_recognition.face_encodings(plo_image)[0]

    # Load a third sample picture and learn how to recognize it.
    md_image = face_recognition.load_image_file("md.jpg")
    md_face_encoding = face_recognition.face_encodings(md_image)[0]

    # Create arrays of known face encodings and their names
    known_face_encodings = [
        ado_face_encoding, plo_face_encoding, md_face_encoding
    ]
    known_face_names = ["Alvin", "Paulos", "Watin"]

    # Initialize some variables
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True
    condition = 0

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(
                    known_face_encodings, face_encoding)
                name = "Unknown"
                condition = 0

                # # If a match was found in known_face_encodings, just use the first one.
                # if True in matches:
                #     first_match_index = matches.index(True)
                #     name = known_face_names[first_match_index]

                # Or instead, use the known face with the smallest distance to the new face
                face_distances = face_recognition.face_distance(
                    known_face_encodings, face_encoding)
                best_match_index = np.argmin(face_distances)
                if matches[best_match_index]:
                    name = known_face_names[best_match_index]
                    condition = 1234

                face_names.append(name)

        process_this_frame = not process_this_frame

        # Display the results
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)

        # Display the resulting image
        cv2.imshow('Video', frame)

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()
Exemple #41
0
    # * Encoding the Faces
    currentEncodeFaces = face_recognition.face_encodings(
        frameRGB, currentFaceLocs)

    # * Looping throw the all Faces in the frame for match
    for currentFaceLoc, currentEncodeFace in zip(currentFaceLocs,
                                                 currentEncodeFaces):

        # Creating a bounding box of the face
        y, w, h, x = currentFaceLoc
        cv2.rectangle(frame, (x, y), (w, h), (0, 255, 0), 2)

        # Comparing the Image
        match = face_recognition.compare_faces(trainEncodeList,
                                               currentEncodeFace)
        faceDis = face_recognition.face_distance(trainEncodeList,
                                                 currentEncodeFace)

        # Finding the minimun distance position
        pos = np.argmin(faceDis)
        # Condition if true then print the class name or print 'Not Match'
        if match[pos]:
            cv2.putText(frame, str(classNames[pos]), (x, y - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 200, 0), 2)
        else:
            cv2.putText(frame, 'Not Match', (x, y - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, .5, (50, 0, 250), 2)

    # * Display
    cv2.imshow('Output', frame)

    # * Exit Pole: Press 'ESC' for exit
Exemple #42
0
    def get_frame(self):
        # Grab a single frame of video
        frame = self.camera.get_frame()

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if self.process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            self.face_locations = face_recognition.face_locations(
                rgb_small_frame)
            self.face_encodings = face_recognition.face_encodings(
                rgb_small_frame, self.face_locations)

            self.face_names = []
            for face_encoding in self.face_encodings:
                # See if the face is a match for the known face(s)
                distances = face_recognition.face_distance(
                    self.known_face_encodings, face_encoding)
                min_value = min(distances)

                # tolerance: How much distance between faces to consider it a match. Lower is more strict.
                # 0.6 is typical best performance.
                name = "Unknown"
                if min_value < 0.6:
                    index = np.argmin(distances)
                    name = self.known_face_names[index]

                self.face_names.append(name)

        self.process_this_frame = not self.process_this_frame

        # Display the results
        for (top, right, bottom, left), name in zip(self.face_locations,
                                                    self.face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)

            #if name == 'Unknown': #unknown일 경우 해당 영역에 블러처리
            #    ksize = 60
            #    faceblur = frame[top:bottom, left:right]
            #    faceblur = cv2.blur(faceblur, (ksize, ksize))
            #    frame[top:bottom, left:right] = faceblur

            # Draw a label with a name below the face
            # name 인식 되면 사각처리해서 이름 새기기
            #else:
            #    cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
            #    font = cv2.FONT_HERSHEY_DUPLEX
            #    cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)

        return frame
Exemple #43
0
	ret,frame = cap.read()

	frame = cv2.flip(frame,1)
	frame = cv2.copyMakeBorder(frame,0,0,100,0,cv2.BORDER_CONSTANT,value=[0,0,0])

	small_frame = cv2.resize(frame, (0,0), fx=0.25, fy=0.25)

	#process_this_frame = True

	faces = []

	face_locations = face_recognition.face_locations(small_frame)
	face_encodings  = face_recognition.face_encodings(small_frame, face_locations)

	for face_en in face_encodings:
		distances = face_recognition.face_distance(student_encodings,face_en)
		index, value = min(enumerate(distances), key=operator.itemgetter(1))
		if(value<=0.47):
			faces.append(students[index][1])
		else:
			faces.append("Unknown")


	print len(face_encodings), len(faces)

	if(len(faces)>0):
		for (top,right,bottom,left),name in zip(face_locations, faces):
			top = top*4
			right = right*4
			bottom = bottom*4
			left = left*4
Exemple #44
0
bohr_test = face_recognition.load_image_file('./img/faces/bohr2.JPG')
bohr_test = cv2.cvtColor(bohr_test, cv2.COLOR_BGR2RGB)

faceLocshin = face_recognition.face_locations(shin)[0]
encodeshin = face_recognition.face_encodings(shin)[0]
cv2.rectangle(shin, (faceLocshin[3], faceLocshin[0]),
              (faceLocshin[1], faceLocshin[2]), (255, 0, 255), 2)

faceLocbohr = face_recognition.face_locations(bohr)[0]
encodebohr = face_recognition.face_encodings(bohr)[0]
cv2.rectangle(bohr, (faceLocbohr[3], faceLocbohr[0]),
              (faceLocbohr[1], faceLocbohr[2]), (255, 0, 255), 2)

faceLoctest = face_recognition.face_locations(bohr_test)[0]
encodebohrtest = face_recognition.face_encodings(bohr_test)[0]
cv2.rectangle(bohr_test, (faceLoctest[3], faceLoctest[0]),
              (faceLoctest[1], faceLoctest[2]), (255, 0, 255), 2)

results1 = face_recognition.compare_faces([encodebohr], encodebohrtest)
results2 = face_recognition.compare_faces([encodebohr], encodeshin)

faceDis1 = face_recognition.face_distance([encodebohr], encodebohrtest)
faceDis2 = face_recognition.face_distance([encodebohr], encodeshin)

print('보어 + 보어 테스트 : %s (%f%%)' % (results1, 1 - faceDis1))
print('보어 + 신해철 테스트 : %s (%f%%)' % (results2, 1 - faceDis2))

# cv2.imshow('shin', shin)
# cv2.imshow('bohr', bohr)
# cv2.waitKey(0)