Ejemplo n.º 1
0
def itp():
	path = os.path.join("", "./Image_data")
	path = os.path.abspath(os.path.realpath(path))
	branch = raw_input("Branch (CS,EE,EC,CE,MM,ME) : ")
	path = path + "/" + branch
	semester = raw_input("Enter Semester (I,II,III,IV,V,VI,VII,VIII): ")
	path = path + "/" + semester

	faces = []

	for img in tqdm(os.listdir(path)):
		file_name = os.path.splitext(img)[0].split(' ')[0]
		img_file = cv2.imread(os.path.join(path, img))

		face_location = face_recognition.face_locations(img_file)[0]
		face_encoding = face_recognition.face_encodings(img_file)[0]

		faces.append([face_encoding, file_name])

	pkl_path = os.path.join("", "./students/" + branch + "_" + semester + ".pkl")
	pkl_path = os.path.abspath(os.path.realpath(pkl_path))

	with open(pkl_path, "wb") as file:
		pickle.dump(faces, file)

	return
Ejemplo n.º 2
0
    def start(self):
        count = 0
        face_locations, face_encoding, face_names = [], [], []
        
        while True:
            count = count + 1
            ret, frame = self.video_capture.read()

            if FAST_PROCESS: 
                ratio = 1.0 / FAST_FRAG
                recog_frame = cv2.resize(frame, (0, 0), fx=ratio, fy=ratio)
            else:
                recog_frame = frame
            rgb_recog_frame = recog_frame[:, :, ::-1]

            if count >= INTERVAL:
                count = 0
                # Note(laofan), I find that the cnn is better than hogs while costs lots of computation.
                face_locations = face_recognition.face_locations(
                    rgb_recog_frame,
                    number_of_times_to_upsample=1,
                    model="hog")

                if len(face_locations) > 0:
                    face_encodings = face_recognition.face_encodings(rgb_recog_frame, face_locations)
                    for face_encoding in face_encodings:
                        face_names = []
                        name = "Unknown"
                        matches = self.best_match(self.known_face_encodings, face_encoding, TOLERANCE)
                        if True in matches:
                            first_match_index = matches.index(True)
                            name = self.known_face_names[first_match_index]

                        face_names.append(name)
                else:
                    count = INTERVAL

            for (top, right, bottom, left), name in zip(face_locations, face_names):
                if FAST_PROCESS:
                    top *= FAST_FRAG
                    right *= FAST_FRAG
                    bottom *= FAST_FRAG
                    left *= FAST_FRAG

                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
                cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
                cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)

            cv2.imshow('Video', frame)

            # Hit 'q' on the keyboard to quit!
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        # Release handle to the webcam
        video_capture.release()
        cv2.destroyAllWindows()
Ejemplo n.º 3
0
def test_image(image_to_check, tolerance=0.6):
    recognized_faces = []

    unknown_image = face_recognition.load_image_file(image_to_check)

    # Scale down image if it's giant so things run a little faster
    unknown_image = scale_image(unknown_image)

    unknown_encodings = face_recognition.face_encodings(unknown_image)
    face_landmarks_list = face_recognition.face_landmarks(unknown_image)
    face_locations = face_recognition.face_locations(unknown_image)

    pil_image = Image.fromarray(unknown_image)
    d = ImageDraw.Draw(pil_image)

    if not unknown_encodings:
        # print out fact that no faces were found in image
        print_result(image_to_check, "no_persons_found", None)

    else:
        for unknown_encoding, face_landmarks, face_location in zip(unknown_encodings, face_landmarks_list,
                                                                   face_locations):
            distances = face_recognition.face_distance(known_face_encodings, unknown_encoding)

            for distance, name in zip(distances, known_names):
                if distance <= tolerance:
                    print_result(image_to_check, name, distance)
                    recognized_faces.append(
                        {'name': name, 'dist': distance, 'landmarks': face_landmarks, 'face_location': face_location}
                    )
                else:
                    print_result(image_to_check, "unknown_person", None)

        for item in recognized_faces:
            face_landmarks = item['landmarks']
            face_location = item['face_location']
            # Print the location of each facial feature in this image
            # Let's trace out each facial feature in the image with a line!
            for facial_feature in face_landmarks.keys():
                print("The {} in this face has the following points: {}".format(facial_feature,
                                                                                face_landmarks[facial_feature]))
                d.line(face_landmarks[facial_feature], width=3)

            # Print the location of each face in this image
            top, right, bottom, left = face_location
            print(
                "A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom,
                                                                                                      right))
            d.rectangle(((left, top), (right, bottom)), outline=4)
            font = ImageFont.truetype("font/arial.ttf", size=30)
            title = item['name']
            text_size = d.textsize(title, font)
            d.text((left, bottom - text_size[1]), title, font=font, fill='white')

    pil_image.save("data/recognition_results/result.jpg")

    return recognized_faces
Ejemplo n.º 4
0
def extract_faces(fname):
    image = face_recognition.load_image_file(fname)
    face_encodings = face_recognition.face_encodings(image)
    face_locations = face_recognition.face_locations(image)
    if len(face_locations) > 0:
        for face_location in face_locations:
            top,right,bottom,left = face_location
            face_image = image[top:bottom, left:right]
            pil_image = PIL.Image.fromarray(face_image)
    return {'encodings':face_encodings, 'locations':face_locations}
Ejemplo n.º 5
0
def face_recognition(frame, drawboxes=True):
    """ Perform face recognition using face_recognition package
    """
    global database, facedatabase, facedatabase_encodings, fraction

    # Define standard found state
    found = False

    # Initialize face database if not already initialized
    if (not database) or (not facedatabase) or (not facedatabase_encodings):
        database = list()
        # Search for known faces in faces/ directory
        for (_, _, filenames) in os.walk('faces'):
            database.extend(filenames)
            break
        # Populate face database and generate face encodings
        facedatabase = [fc.load_image_file(os.path.join('faces', name)) for name in database]
        facedatabase_encodings = [fc.face_encodings(face)[0] for face in facedatabase]
    
    # Create a resized copy of the frame in order to speed up processing
    small_frame = cv2.resize(frame, (0, 0), fx=fraction, fy=fraction)

    # Detect faces and generate their encodings
    face_locations = fc.face_locations(small_frame)
    face_encodings = fc.face_encodings(small_frame, face_locations)

    # Recognize faces if found
    if len(face_encodings) > 0:

        found = True

        # Recognize faces and determine their names
        face_names = []
        for face_encoding in face_encodings:
            match = fc.compare_faces(facedatabase_encodings, face_encoding, tolerance=0.5)
            try: name = database[match.index(True)].split('.')[0]
            except ValueError: name = "Unknown"
            face_names.append(name)
        
        # Draw a rectangle and name around recognized faces if required
        if drawboxes:
            for (top, right, bottom, left), name in zip(face_locations, face_names):
                if name != "Unknown":
                    top = int((1/fraction)*top - 16)
                    right = int((1/fraction)*right + 16)
                    bottom = int((1/fraction)*bottom + 16)
                    left = int((1/fraction)*left - 16)
                    cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
                    cv2.rectangle(frame, (left-1, top - 20), (max(right+1, left+12*len(name)), top), (0, 0, 255), cv2.FILLED)
                    font = cv2.FONT_HERSHEY_DUPLEX
                    cv2.putText(frame, name, (left + 6, top - 6), font, 0.5, (255, 255, 255), 1)
    
    # Return frame and found state
    return frame, found
Ejemplo n.º 6
0
def detect_faces(img):
    '''
    Detect faces in image
    :param img: cv::mat HxWx3 RGB
    :return: yield 4 <x,y,w,h>
    '''
    # detect faces
    bbs = face_recognition.face_locations(img)

    for y, right, bottom, x in bbs:
        # Scale back up face bb
        yield x, y, (right - x), (bottom - y)
def train(train_dir, model_save_path = "", n_neighbors = None, knn_algo = 'ball_tree', verbose=False):
    """
    Trains a k-nearest neighbors classifier for face recognition.

    :param train_dir: directory that contains a sub-directory for each known person, with its name.

     (View in source code to see train_dir example tree structure)

     Structure:
        <train_dir>/
        ├── <person1>/
        │   ├── <somename1>.jpeg
        │   ├── <somename2>.jpeg
        │   ├── ...
        ├── <person2>/
        │   ├── <somename1>.jpeg
        │   └── <somename2>.jpeg
        └── ...
    :param model_save_path: (optional) path to save model of disk
    :param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified.
    :param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
    :param verbose: verbosity of training
    :return: returns knn classifier that was trained on the given data.
    """
    X = []
    y = []
    for class_dir in listdir(train_dir):
        if not isdir(join(train_dir, class_dir)):
            continue
        for img_path in image_files_in_folder(join(train_dir, class_dir)):
            image = face_recognition.load_image_file(img_path)
            faces_bboxes = face_locations(image)
            if len(faces_bboxes) != 1:
                if verbose:
                    print("image {} not fit for training: {}".format(img_path, "didn't find a face" if len(faces_bboxes) < 1 else "found more than one face"))
                continue
            X.append(face_recognition.face_encodings(image, known_face_locations=faces_bboxes)[0])
            y.append(class_dir)


    if n_neighbors is None:
        n_neighbors = int(round(sqrt(len(X))))
        if verbose:
            print("Chose n_neighbors automatically as:", n_neighbors)

    knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')
    knn_clf.fit(X, y)

    if model_save_path != "":
        with open(model_save_path, 'wb') as f:
            pickle.dump(knn_clf, f)
    return knn_clf
    def process_image(self, image):
        """Process image."""
        # pylint: disable=import-error
        import face_recognition

        fak_file = io.BytesIO(image)
        fak_file.name = 'snapshot.jpg'
        fak_file.seek(0)

        image = face_recognition.load_image_file(fak_file)
        face_locations = face_recognition.face_locations(image)

        self.process_faces(face_locations, len(face_locations))
Ejemplo n.º 9
0
    def _img_morph(self, img, expresion):
        bbs = face_recognition.face_locations(img)
        if len(bbs) > 0:
            y, right, bottom, x = bbs[0]
            bb = x, y, (right - x), (bottom - y)
            face = face_utils.crop_face_with_bb(img, bb)
            face = face_utils.resize_face(face)
        else:
            face = face_utils.resize_face(img)

        morphed_face = self._morph_face(face, expresion)

        return morphed_face
Ejemplo n.º 10
0
def get_face(img, target_encodings):
    img = np.array(img)
    locations = face_recognition.face_locations(img, model="cnn")
    encodings = face_recognition.face_encodings(img, locations)
    landmarks = face_recognition.face_landmarks(img, locations)
    if len(locations) == 0:
        return None, None, None, None, None
    if target_encodings is not None:
        distances = [ face_recognition.face_distance([target_encodings], encoding) for encoding in encodings ]
        idx_closest = distances.index(min(distances))
        target_face, target_landmarks = locations[idx_closest], landmarks[idx_closest]
    else:
        target_face, target_landmarks = locations[0], landmarks[0]
    top, right, bottom, left = target_face
    x, y, w, h = left, top, right-left, bottom-top
    return x, y, w, h, target_landmarks
Ejemplo n.º 11
0
    def reco_faces(self, class_face):
        def match_fono(location_, image_):
            top, right, bottom, left = location_
            # You can access the actual face itself like this:
            face_image = image_[top:bottom, left:right]
            try:
                unknown_face_encoding = face_recognition.face_encodings(face_image, num_jitters=NUM_JITTERS)[0]
                recos = face_recognition.compare_faces(
                    self.known_faces_list, unknown_face_encoding, tolerance=TOLERANCE)
            except:
                recos = []

            unk = "UNK{n}@unk{n}@00@"
            z = hash(location_)

            face_dict = {
                self.known_faces[index] if is_there else unk.format(n=z): location_ for index, is_there in
                enumerate(recos)}
            # face_dict = {
            #     self.known_faces[index]: location_ for index, is_there in
            #     enumerate(recos) if is_there}
            # print(
            #     "location Locs: {}  Top: {}, Left: {}, Bottom: {}, Right: {}"
            #     .format(face_dict, top, left, bottom, right))
            if any(("@unk" not in name) or ("@Fono-" in name) for name in face_dict.keys()):
                face_dict = {name: location for name, location in face_dict.items()
                             if ("@unk" not in name) or ("@Fono-" in name)}
            else:
                if face_dict.keys():
                    pil_image = Imger.fromarray(face_image)
                    pil_image.save("fono17_2/@Fono-{nome}.png".format(nome=face_dict.keys()[0]), "PNG")

            print("location Locs: {}  ".format(face_dict))
            return face_dict

        # Load the jpg file into a numpy array
        image = face_recognition.load_image_file(class_face)

        # Find all the faces in the image using a pre-trained convolutional neural network.
        # This method is more accurate than the default HOG model, but it's slower
        # unless you have an nvidia GPU and dlib compiled with CUDA extensions. But if you do,
        # this will use GPU acceleration and perform well.
        # See also: find_faces_in_picture.py
        face_locations = face_recognition.face_locations(image, number_of_times_to_upsample=NTU, model=MODEL)  # )
        fonos = {fono: locat for locat in face_locations for fono, locati in match_fono(locat, image).items() if fono}
        return fonos
Ejemplo n.º 12
0
def faceRecognitionFromPicture(cvframe):
    print("---- Recognized Started ----")
    small_frame = cv2.resize(cvframe, (0, 0), fx=0.25, fy=0.25)

    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    small_rgb_frame = small_frame[:, :, ::-1]

    # get face location
    face_locations = face_recognition.face_locations(small_rgb_frame)
    print("- Face location scan completed")

    face_encodings = face_recognition.face_encodings(
        small_rgb_frame, face_locations)

    face_names = []
    for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
        matches = face_recognition.compare_faces(
            known_face_encodings, face_encoding)
        name = "not recognized"  # default name is not recognized

        # If a match was found in known_face_encodings, just use the first one.
        if True in matches:
            first_match_index = matches.index(True)
            name = known_face_names[first_match_index]

        face_names.append(name)
    
        
    print("- Face Locations:")
    # print face data
    print(*face_locations, sep='\n')
    print(*face_names, sep='\n')
    print("- Face name searching completed")
    # draw face rectangle and name on current frame
    drawFaceOnImage(cvframe, face_locations, face_names)
    # Label string
    faceNames = ''.join(face_names)
    count = str(len(face_locations))
    location = ','.join([str(i) for i in face_locations])
    return_string = "\nNames: "+faceNames + \
        "\nFace Count: "+count+"\nLocations: "+location+"\n"
    lblTag["text"] = return_string
    print("---- Recognized Completed ----")
Ejemplo n.º 13
0
 def process(self,image):
     if self.srccode=="opencv":
         if len(image.shape)==2:
             gray=image
         else:
             gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
         faces = self.opencv_face_detection.detectMultiScale(gray, 1.3, 5)
     elif self.srccode=="dlib":
         # to avoid package conflict
         from face_recognition import face_locations
         face_locations = face_locations(image)
         faces=[]
         for (t,r,b,l) in face_locations:
             faces.append((t,l,r-l,b-t))
     #todo elif self.srccode=='facenet':
     else:
         print('undefine srccode %s'%self.srccode)
         faces=[]
     return faces
Ejemplo n.º 14
0
def find_and_save_face(web_file,face_file):
    # Load the jpg file into a numpy array
    image = face_recognition.load_image_file(web_file)
    print(image.dtype)
    # Find all the faces in the image
    face_locations = face_recognition.face_locations(image)

    print("I found {} face(s) in this photograph.".format(len(face_locations)))

    for face_location in face_locations:

        # Print the location of each face in this image
        top, right, bottom, left = face_location
        print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))

        # You can access the actual face itself like this:
        face_image = image[top:bottom, left:right]
        pil_image = Image.fromarray(face_image)
        pil_image.save(face_file)
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6):
    """
    Recognizes faces in given image using a trained KNN classifier

    :param X_img_path: path to image to be recognized
    :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
    :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
    :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance
           of mis-classifying an unknown person as a known one.
    :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
        For faces of unrecognized persons, the name 'unknown' will be returned.
    """
    if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:
        raise Exception("Invalid image path: {}".format(X_img_path))

    if knn_clf is None and model_path is None:
        raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")

    # Load a trained KNN model (if one was passed in)
    if knn_clf is None:
        with open(model_path, 'rb') as f:
            knn_clf = pickle.load(f)

    # Load image file and find face locations
    X_img = face_recognition.load_image_file(X_img_path)
    X_face_locations = face_recognition.face_locations(X_img)

    # If no faces are found in the image, return an empty result.
    if len(X_face_locations) == 0:
        return []

    # Find encodings for faces in the test iamge
    faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)

    # Use the KNN model to find the best matches for the test face
    closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
    are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]

    # Predict classes and remove classifications that aren't within the threshold
    return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
Ejemplo n.º 16
0
    def _extract_faces(self):
        qs_unknown_person = Person.objects.filter(name='unknown')
        if qs_unknown_person.count() == 0:
            unknown_person = Person(name='unknown')
            unknown_person.save()
        else:
            unknown_person = qs_unknown_person[0]

        image = PIL.Image.open(self.thumbnail)
        image = np.array(image.convert('RGB'))

        face_encodings = face_recognition.face_encodings(image)
        face_locations = face_recognition.face_locations(image)
    
        faces = []
        if len(face_locations) > 0:
            for idx_face, face in enumerate(zip(face_encodings,face_locations)):
                face_encoding = face[0]
                face_location = face[1]
                top,right,bottom,left = face_location
                face_image = image[top:bottom, left:right]
                face_image = PIL.Image.fromarray(face_image)

                face = Face()
                face.image_path = self.image_hash+"_"+str(idx_face)+'.jpg'
                face.person = unknown_person
                face.photo = self
                face.location_top = face_location[0]
                face.location_right = face_location[1]
                face.location_bottom = face_location[2]
                face.location_left = face_location[3]
                face.encoding = face_encoding.tobytes().hex()
#                 face.encoding = face_encoding.dumps()

                face_io = BytesIO()
                face_image.save(face_io,format="JPEG")
                face.image.save(face.image_path, ContentFile(face_io.getvalue()))
                face_io.close()
                face.save()
Ejemplo n.º 17
0
def detect_biggest_face(img):
    '''
    Detect biggest face in image
    :param img: cv::mat HxWx3 RGB
    :return: 4 <x,y,w,h>
    '''
    # detect faces
    bbs = face_recognition.face_locations(img)

    max_area = float('-inf')
    max_area_i = 0
    for i, (y, right, bottom, x) in enumerate(bbs):
        area = (right - x) * (bottom - y)
        if max_area < area:
            max_area = area
            max_area_i = i

    if max_area != float('-inf'):
        y, right, bottom, x = bbs[max_area_i]
        return x, y, (right - x), (bottom - y)

    return None
Ejemplo n.º 18
0
def get_face_images(picture_path):
    # Load the jpg file into a numpy array
    image = face_recognition.load_image_file(picture_path)

    # Find all the faces in the image using the default HOG-based model.
    # This method is fairly accurate, but not as accurate as the CNN model and not GPU accelerated.
    # See also: find_faces_in_picture_cnn.py
    face_locations = face_recognition.face_locations(image)

    print("I found {} face(s) in this photograph.".format(len(face_locations)))

    for face_location in face_locations:
        # Print the location of each face in this image
        top, right, bottom, left = face_location
        print(
            "A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom,
                                                                                                  right))

        # You can access the actual face itself like this:
        face_image = image[top:bottom, left:right]
        pil_image = Image.fromarray(face_image)
        print(picture_path)
        pil_image.save("{0}.jpg".format(picture_path[-7:-3]))
def predict(X_img_path, knn_clf = None, model_save_path ="", DIST_THRESH = .5):
    """
    recognizes faces in given image, based on a trained knn classifier

    :param X_img_path: path to image to be recognized
    :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
    :param model_save_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
    :param DIST_THRESH: (optional) distance threshold in knn classification. the larger it is, the more chance of misclassifying an unknown person to a known one.
    :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
        For faces of unrecognized persons, the name 'N/A' will be passed.
    """

    if not isfile(X_img_path) or splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:
        raise Exception("invalid image path: {}".format(X_img_path))

    if knn_clf is None and model_save_path == "":
        raise Exception("must supply knn classifier either thourgh knn_clf or model_save_path")

    if knn_clf is None:
        with open(model_save_path, 'rb') as f:
            knn_clf = pickle.load(f)

    X_img = face_recognition.load_image_file(X_img_path)
    X_faces_loc = face_locations(X_img)
    if len(X_faces_loc) == 0:
        return []

    faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_faces_loc)


    closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)

    is_recognized = [closest_distances[0][i][0] <= DIST_THRESH for i in range(len(X_faces_loc))]

    # predict classes and cull classifications that are not with high confidence
    return [(pred, loc) if rec else ("N/A", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_faces_loc, is_recognized)]
Ejemplo n.º 20
0
def attendance(semester, branch):

    try:
        loc = os.path.join("./students",branch+"_"+semester+".pkl")
        students = np.array(pickle.load(open(loc, "rb")))
    except:
        print "Unable to load student image details."
        print "Please make sure that it exists in students folder."
        return set()

    student_encodings = []
    for student in students:
    	student_encodings.append(student[0])
    face_locations = []
    cap = cv2.VideoCapture(0)
    present = []
    while(1):
    	ret,frame = cap.read()

    	frame = cv2.flip(frame,1)
    	frame = cv2.copyMakeBorder(frame,0,0,150,0,cv2.BORDER_CONSTANT,value=[0,0,0])

    	small_frame = cv2.resize(frame, (0,0), fx=1, fy=1)

    	faces = []

    	face_locations = face_recognition.face_locations(small_frame)
    	face_encodings  = face_recognition.face_encodings(small_frame, face_locations)

    	for face_en in face_encodings:
    		distances = face_recognition.face_distance(student_encodings,face_en)
    		index, difference = min(enumerate(distances), key=operator.itemgetter(1))
    		if(difference<=0.5):
    		    faces.append(students[index][1])
    		    present.append(students[index][1])
    		else:
    		    faces.append("Unknown")

    	if(len(faces)>0):
    		for (top,right,bottom,left),regno in zip(face_locations, faces):
    			top = top
    			right = right
    			bottom = bottom
    			left = left

    			color = (0,0,255)

    			if (regno!="Unknown"):
    				color = (0,255,0)
    			cv2.rectangle(frame, (left,top) , (right,bottom) , color, 2)

    		y=20
    		font = cv2.FONT_HERSHEY_COMPLEX
    		for regno in faces:
    			if regno!="Unknown":
    				cv2.putText(frame, regno, (20,y), font, 0.5, (255, 255, 255), 1)
    				y = y+20

    	cv2.imshow('FRAS',frame)
    	if cv2.waitKey(1) & 0xFF == ord('q'):
    		break

    cap.release()
    cv2.destroyAllWindows()
    return set(present)
Ejemplo n.º 21
0
    def process(self, image_search_path, show=True):
        shrink_scale = 2.0
        image_search = cv2.imread(image_search_path)
        image = cv2.resize(image_search, (0, 0), fx=1.0 / shrink_scale, fy=1.0 / shrink_scale)
        face_locations = face_recognition.face_locations(image)
        if len(face_locations) == 0:
            print('cannot find face in image', image_search_path)
            return None

        face_encodings = face_recognition.face_encodings(image, face_locations)

        face_names = []
        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            match = face_recognition.compare_faces(self.face_encodings, face_encoding)
            name = "Unknown"

            print(match.__class__)
            for idx in range(len(match)):
                if match[idx]:
                    name = self.face_names[idx]
            face_names.append(name)

        face_infos = []
        # Display the results
        for (top, right, bottom, left), name in zip(face_locations, face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= shrink_scale
            right *= shrink_scale
            bottom *= shrink_scale
            left *= shrink_scale

            locations = {'top': int(top), 'right': int(right), 'bottom': int(bottom), 'left': int(left)}
            face_name = {'name': name}

            face_info = {'location': locations, 'name': face_name}
            face_infos.append(face_info)

            left=int(left)
            top=int(top)
            right=int(right)
            bottom=int(bottom)
            # Draw a box around the face
            cv2.rectangle(image, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(image, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            text_org=(left+6,min(bottom-6,0))

            print('type(image)',type(image))
            print('type(name)',type(name),name)
            print('type(text_org)',type(text_org))
            print('type(font)',type(font))
            cv2.putText(img=image, text=name, org=text_org, fontFace=font, fontScale=1.0, color=(255, 255, 255), thickness=1)

        return_data = {'image_path': image_search_path, 'face_infos': face_infos, 'labeled_image':image}
        print(return_data)

        if show:
            #cv2.imshow('labeled image',image)
            #cv2.waitKey(0)
            #img = mpimg.imread('stinkbug.png')
            imgplot = plt.imshow(image)
            plt.show()

        return return_data
Ejemplo n.º 22
0
def find_faces(path):
    image = face_recognition.load_image_file(path)
    face_locations = face_recognition.face_locations(image)

    return face_locations
Ejemplo n.º 23
0
def detect_faces(frame, model="hog"):
    face_locations = face_recognition.face_locations(frame, model=model)
    landmarks = _raw_face_landmarks(frame, face_locations)

    for ((y, right, bottom, x), landmarks) in zip(face_locations, landmarks):
        yield DetectedFace(frame[y: bottom, x: right], x, right - x, y, bottom - y, landmarks)
Ejemplo n.º 24
0
        # Append encodings and name
        known_faces.append(encoding)
        known_names.append(name)


print('Processing unknown faces...')
# Now let's loop over a folder of faces we want to label
for filename in os.listdir(UNKNOWN_FACES_DIR):

    # Load image
    print(f'Filename {filename}', end='')
    image = face_recognition.load_image_file(f'{UNKNOWN_FACES_DIR}/{filename}')

    # This time we first grab face locations - we'll need them to draw boxes
    locations = face_recognition.face_locations(image, model=MODEL)

    # Now since we know loctions, we can pass them to face_encodings as second argument
    # Without that it will search for faces once again slowing down whole process
    encodings = face_recognition.face_encodings(image, locations)

    # We passed our image through face_locations and face_encodings, so we can modify it
    # First we need to convert it from RGB to BGR as we are going to work with cv2
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

    # But this time we assume that there might be more faces in an image - we can find faces of dirrerent people
    print(f', found {len(encodings)} face(s)')
    for face_encoding, face_location in zip(encodings, locations):

        # We use compare_faces (but might use face_distance as well)
        # Returns array of True/False values in order of passed known_faces
Ejemplo n.º 25
0
import numpy as np
import cv2
import face_recognition as fr

video_capture = cv2.VideoCapture(0)

messi_image = fr.load_image_file("messi.jpg")
messi_face_encoding = fr.face_encodings(messi_image)[0]

known_face_encoding = [rushabh_face_encoding, messi_face_encoding]
known_face_names = ["Rushabh", "Messi"]

while True:
    check, frames = video_capture.read()

    face_locations = fr.face_locations(frames)
    face_encodings = fr.face_encodings(frames, face_locations)

    for (top, right, bottom,
         left), face_encodings in zip(face_locations, face_encodings):

        matches = fr.compare_faces(known_face_encoding, face_encodings)

        name = "Unknown"

        face_distance = fr.face_distance(known_face_encoding, face_encodings)

        best_match_index = np.argmin(face_distance)

        if matches[best_match_index]:
            name = known_face_names[best_match_index]
Ejemplo n.º 26
0
def process_subject(encodings, f_video, dir_out):
    print_video_metadata(f_video)
    video_capture = cv2.VideoCapture(f_video)
    # Check if camera opened successfully
    if not video_capture.isOpened():
        print("Error opening video  file")
        return

    frame_id = 0
    # Read until video is completed
    while video_capture.isOpened():
        print(Path(f"fr{frame_id}_face{0}.png"))

        print(dir_out)
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        if Path(f"{dir_out}faces/fr{frame_id}_face{0}.png").is_file():
            print("skipping")
            frame_id += 1
            continue
        if ret:
            # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
            rgb_frame = frame[:, :, ::-1]
            # Find all the faces in the current frame of video
            face_locations = face_recognition.face_locations(rgb_frame,
                                                             model="cnn")

            # Initialize variables
            # face_locations_all = []
            # frame_id = 0
            # Display the results
            for j, (top, right, bottom, left) in enumerate(face_locations):
                # Draw a box around the face
                face_locations_dict = {
                    "frame": frame_id,
                    "face": j,
                    "bb": (left, top, right, bottom),
                    "landmarks": face_locations[j],
                }

                face_image = crop_detection(Image.fromarray(rgb_frame),
                                            face_locations_dict)
                # try:
                unknown_encoding = face_recognition.face_encodings(
                    np.array(face_image))
                #     cv2.cvtColor(np.array(face), cv2.COLOR_RGB2BGR)
                # )
                if not len(unknown_encoding):
                    continue
                unknown_encoding = unknown_encoding[0]
                results = face_recognition.compare_faces(
                    encodings, unknown_encoding)
                face_image.save(f"{dir_out}faces/fr{frame_id}_face{j}.png")
                pd.to_pickle(
                    unknown_encoding,
                    f"{dir_out}encodings/fr{frame_id}_face{j}-encoding.csv",
                )
                pd.DataFrame(results).astype(int).to_csv(
                    f"{dir_out}predictions/fr{frame_id}_face{j}-predictions.csv",
                    header=None,
                    index=False,
                )
                pd.DataFrame().from_dict(
                    face_locations_dict.items()).T.to_json(
                        f"{dir_out}meta/fr{frame_id}_face{j}-meta.json")

                print(results)
                # except:
                print(f"{dir_out}fr{frame_id}_face{j}.png")
                # finally:

                # cv2.imwrite(f"{dout}fr{frame_id}_face{j}.png", face)
                # cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
            frame_id += 1
        else:
            break

    # When everything done, release the video capture object
    video_capture.release()
Ejemplo n.º 27
0
unknown_image = face_recognition.load_image_file("./unknown_pics/unknown.jpg")

angel_image = face_recognition.load_image_file("./known_ppl/Angel Gao.jpg")
angel_face_encoding = face_recognition.face_encodings(angel_image)[0]

melissa_image = face_recognition.load_image_file("./known_ppl/Melissa Pan.jpg")
melissa_face_encoding = face_recognition.face_encodings(melissa_image)[0]

known_face_encodings = [angel_face_encoding, melissa_face_encoding]

known_face_names = ["Angel Gao", "Melissa Pan"]

# Find all the faces in the image using the default HOG-based model.
# This method is fairly accurate, but not as accurate as the CNN model and not GPU accelerated.
# See also: find_faces_in_picture_cnn.py
face_locations = face_recognition.face_locations(unknown_image)
face_encodings = face_recognition.face_encodings(unknown_image, face_locations)

print("I found {} face(s) in this photograph.".format(len(face_locations)))

# for face_location in face_locations:

#     # Print the location of each face in this image
#     top, right, bottom, left = face_location
#     print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))

#     # You can access the actual face itself like this:
#     face_image = image[top:bottom, left:right]
#     pil_image = Image.fromarray(face_image)
#     pil_image.show()
Ejemplo n.º 28
0
def train(train_dir,
          model_save_path=None,
          n_neighbors=None,
          knn_algo='ball_tree',
          verbose=False):
    """
    Trains a k-nearest neighbors classifier for face recognition.

    :param train_dir: directory that contains a sub-directory for each known person, with its name.

     (View in source code to see train_dir example tree structure)

     Structure:
        <train_dir>/
        ├── <person1>/
        │   ├── <somename1>.jpeg
        │   ├── <somename2>.jpeg
        │   ├── ...
        ├── <person2>/
        │   ├── <somename1>.jpeg
        │   └── <somename2>.jpeg
        └── ...

    :param model_save_path: (optional) path to save model on disk
    :param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified
    :param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
    :param verbose: verbosity of training
    :return: returns knn classifier that was trained on the given data.
    """
    X = []
    y = []

    # Loop through each person in the training set
    for class_dir in os.listdir(train_dir):
        if not os.path.isdir(os.path.join(train_dir, class_dir)):
            continue

        # Loop through each training image for the current person
        for img_path in image_files_in_folder(
                os.path.join(train_dir, class_dir)):
            image = face_recognition.load_image_file(img_path)
            face_bounding_boxes = face_recognition.face_locations(image)

            if len(face_bounding_boxes) != 1:
                # If there are no people (or too many people) in a training image, skip the image.
                if verbose:
                    print("Image {} not suitable for training: {}".format(
                        img_path,
                        "Didn't find a face" if len(face_bounding_boxes) < 1
                        else "Found more than one face"))
            else:
                # Add face encoding for current image to the training set
                X.append(
                    face_recognition.face_encodings(
                        image, known_face_locations=face_bounding_boxes)[0])
                y.append(class_dir)

    # Determine how many neighbors to use for weighting in the KNN classifier
    if n_neighbors is None:
        n_neighbors = int(round(math.sqrt(len(X))))
        if verbose:
            print("Chose n_neighbors automatically:", n_neighbors)

    # Create and train the KNN classifier
    knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
                                             algorithm=knn_algo,
                                             weights='distance')
    knn_clf.fit(X, y)

    # Save the trained KNN classifier
    if model_save_path is not None:
        with open(model_save_path, 'wb') as f:
            pickle.dump(knn_clf, f)

    return knn_clf
Ejemplo n.º 29
0
def face_R(frame):
    face_locations = face_recognition.face_locations(frame)
    face_locations = np.array(face_locations)
    return face_locations
Ejemplo n.º 30
0
def detect_faces(image):
    """ Detects faces in the supplied image file """

    face_locations = face_recognition.face_locations(image)
    return face_locations
Ejemplo n.º 31
0
face_locations = []
face_encodes = []
face_name = []
process_this_frame = True

ret = True
while True:
    ret, frame = video_capture.read()
    old = frame
    if ret is False:
        break

    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    face_locations = face_recognition.face_locations(
        frame, number_of_times_to_upsample=0, model="cnn")
    face_encodes = face_recognition.face_encodings(frame, face_locations)

    frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

    face_name = []

    for face_encode in face_encodes:

        match = face_recognition.compare_faces(known_faces,
                                               face_encode,
                                               tolerance=0.55)
        try:
            face_name.append(names[match.index(True)])
        except:
            pass
Ejemplo n.º 32
0
def videotest(filename):
    video_capture = cv2.VideoCapture(filename)
    length = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(video_capture.get(cv2.CAP_PROP_FPS))

    sai = face_recognition.load_image_file("2.jpg")
    sfencoding = face_recognition.face_encodings(sai)[0]

    dhoni = face_recognition.load_image_file("1.jpg")
    dfencoding = face_recognition.face_encodings(dhoni)[0]

    virat = face_recognition.load_image_file("3.jpg")
    vfencoding = face_recognition.face_encodings(virat)[0]

    modi = face_recognition.load_image_file("4.jpg")
    mfencoding = face_recognition.face_encodings(modi)[0]

    prateek = face_recognition.load_image_file("prateek.jpg")
    pfencoding = face_recognition.face_encodings(prateek)[0]

    harmesh = face_recognition.load_image_file("harmesh.jpg")
    hfencoding = face_recognition.face_encodings(harmesh)[0]

    vishnu = face_recognition.load_image_file("vishnu.jpg")
    nvencoding = face_recognition.face_encodings(vishnu)[0]

    robert = face_recognition.load_image_file("robertdowney jr.jpg")
    rbjencoding = face_recognition.face_encodings(robert)[0]

    harry = face_recognition.load_image_file("daniel radcliffe.jpg")
    harryencoding = face_recognition.face_encodings(harry)[0]

    hermoine = face_recognition.load_image_file("emma watson.jpg")
    herencoding = face_recognition.face_encodings(hermoine)[0]

    rupert = face_recognition.load_image_file("ront1.jpg")
    ronencoding = face_recognition.face_encodings(rupert)[0]

    known_face_encodings = [
        sfencoding, dfencoding, vfencoding, mfencoding, pfencoding, hfencoding,
        nvencoding, rbjencoding, harryencoding, herencoding, ronencoding
    ]
    known_face_names = [
        "sai", "DHONI", "virat", "modi", "prateek", "harmesh", "vishnu",
        "robert downey jr.", "daniel radcliffe", "emma watson", "rupert grint"
    ]

    width = int(video_capture.get(3))  # float
    height = int(video_capture.get(4))
    fourcc = cv2.VideoWriter_fourcc(*'vp80')
    PATH = '/home/saisri/projectcopy_5/project/demo.webm'
    out = cv2.VideoWriter(PATH, fourcc, fps, (width, height))
    for i in range(1, length - 1):

        ret, frame = video_capture.read()
        rgb_frame = frame[:, :, ::-1]
        face_locations = face_recognition.face_locations(rgb_frame)
        face_encodings = face_recognition.face_encodings(
            rgb_frame, face_locations)

        for (top, right, bottom,
             left), face_encoding in zip(face_locations, face_encodings):
            matches = face_recognition.compare_faces(known_face_encodings,
                                                     face_encoding)
            name = "Unknown"
            face_distances = face_recognition.face_distance(
                known_face_encodings, face_encoding)
            best_match_index = np.argmin(face_distances)
            if matches[best_match_index]:
                name = known_face_names[best_match_index]

            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
            cv2.rectangle(frame, (left, bottom - 10), (right, bottom + 10),
                          (10, 10, 10), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 2, bottom), font, 0.4,
                        (255, 255, 255), 1)

        print()
        sys.stdout.write(f"writing...{int((i/length)*100)+1}%")
        sys.stdout.flush()
        out.write(frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    out.release()
    cv2.destroyAllWindows()
    return PATH
Ejemplo n.º 33
0
def run_single(image_path: str):
    image = face_recognition.load_image_file(image_path)
    face_locations = face_recognition.face_locations(image, model='cnn')
    return face_locations
Ejemplo n.º 34
0
def detect_and_extract(test_image_paths):
    # Loop over the images paths provided.
    idx = 1
    for obj in test_image_paths:
        logging.debug('**********Processing {}'.format(obj['image']))
        for label in obj['labels']:
            # If the object detected is a person...
            if label['name'] == 'person':
                # Read image from disk. 
                img = cv2.imread(obj['image'])
                if img is None:
                    # Bad image was read.
                    logging.error('Bad image was read.')
                    continue

                # Bound the roi using the coord info passed in.
                # The roi is area around person(s) detected in image.
                # (x1, y1) are the top left roi coordinates.
                # (x2, y2) are the bottom right roi coordinates.
                y2 = int(label['box']['ymin'])
                x1 = int(label['box']['xmin'])
                y1 = int(label['box']['ymax'])
                x2 = int(label['box']['xmax'])
                roi = img[y2:y1, x1:x2]
                if roi.size == 0:
                    # Bad object roi...move on to next image.
                    logging.error('Bad object roi.')
                    continue

                # Detect the (x, y)-coordinates of the bounding boxes corresponding
                # to each face in the input image.
                rgb = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
                detection = face_recognition.face_locations(
                    rgb, NUMBER_OF_TIMES_TO_UPSAMPLE, FACE_DET_MODEL)
                if not detection:
                    # No face detected.
                    logging.debug('No face detected.')
                    if args['save_person_no_face']:
                        # Save extracted person (w/o face) object to disk.
                        obj_img = args['output']+'/'+str(idx)+'-obj'+'.jpg'
                        logging.info('Writing {}'.format(obj_img))
                        cv2.imwrite(obj_img, roi)
                        idx += 1
                    continue

                if args['save_face']:
                    # Carve out and save face roi. 
                    (face_top, face_right, face_bottom, face_left) = detection[0]
                    #cv2.rectangle(rgb, (face_left, face_top), (face_right, face_bottom), (255,0,0), 2)
                    #cv2.imwrite('./face_rgb.jpg', rgb)
                    face_roi = roi[face_top:face_bottom, face_left:face_right]
                    face_img = args['output']+'/'+str(idx)+'-face'+'.jpg'
                    logging.info('Writing {}'.format(face_img))
                    cv2.imwrite(face_img, face_roi)

                if args['save_person_face']:
                    # Save extracted person (w/face) object to disk.
                    obj_img = args['output']+'/'+str(idx)+'-obj'+'.jpg'
                    logging.info('Writing {}'.format(obj_img))
                    cv2.imwrite(obj_img, roi)

                idx += 1
    return
Ejemplo n.º 35
0
def recognize_image(image, known_face_names, known_face_encodings):
    recognized_faces = []

    print("Begining facial detection")
    image_f = face_recognition.load_image_file("./avatar/avatar.png")
    face_locations = face_recognition.face_locations(image_f)
    print("Completed facial detection")
    print("Known Names: ", known_face_names)
    face_encodings = face_recognition.face_encodings(image_f, face_locations)

    # Convert to PIL format
    pil_image = Image.fromarray(image_f)

    # Create a ImageDraw instance
    draw = ImageDraw.Draw(pil_image)
    fontsize = 50
    font = ImageFont.truetype("arial.ttf", fontsize)

    # Loop through faces in test image
    for(top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
        # matches is list of boolean values representing which faces in known_face_encodings the current is "familiar" with
        matches = face_recognition.compare_faces(known_face_encodings, face_encoding) 
        # print("What is face encodings?: ", face_encoding)
        print("What is matches :???: ", matches)
        # print("is encodings a numpy array?: ", type(face_encoding) )
        #Note: face_encoding is a numpy array of size (128,) 
        name = "Unknown Person"
        diff_list = best_match (face_encoding, known_face_encodings)
        print("Differences: ", diff_list)
        # find index with minimum difference
        min_index = 0
        print("Min_VAl?: ", min_index)
        min_val = diff_list[min_index]
        # find index with minimum
        for dic in range(len(diff_list)):
            if diff_list[dic] < min_val:
                min_index = dic
                min_val = diff_list[dic]
        # only if numpy also says its "similar" 
        # and make sure the diff is at least 4.4 or under
        #  and (min_val <= 4.4)
        min_threshold = 10
        # if(matches[min_index]):
            # name = known_face_names[min_index]
        if(matches[min_index]) and (min_val <= min_threshold):
            name = known_face_names[min_index]

        if name != "Unknown Person" and name not in recognized_faces:
            recognized_faces.append(name)
        print("WE HAVE RECOGNIZED: ", name)
        print("With diff_value: ", min_val)

        # Draw box
        draw.rectangle(((left, top), (right, bottom)), outline=(255,255,0))
        # Draw label
        text_width, text_height = draw.textsize(name)
        text_height = text_height*4
        draw.rectangle(((left,bottom - text_height - 10), (right, bottom)), fill=(255,255,0), outline=(255,255,0))
        draw.text((left + 6, bottom - text_height - 5), name, font=font, fill=(0,0,0))
        # draw.text((5, 5), name, fill=(0,0,0))

    del draw

    # Uncomment to Display image:
    # pil_image.show()

    # Save image
    pil_image.save('./avatar/detected.png')
    print("\n\n\n\n\n")
    return face_locations, recognized_faces
Ejemplo n.º 36
0
# coding: utf-8

import face_recognition
import cv2
import sys

# reload(sys)
# sys.setdefaultencoding('utf-8')

video_capture = cv2.VideoCapture(0)

while True:
	ret, frame = video_capture.read()

	# Find all the faces and face enqcodings in the frame of video
	face_locations = face_recognition.face_locations(frame)
	face_encodings = face_recognition.face_encodings(frame, face_locations)

	# Loop through each face in this frame of video
	for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
		name = 'test'
		font = cv2.FONT_HERSHEY_DUPLEX

		# Draw a box around the face
		cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

		# Draw a label with a name below the face
		cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
		cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
		cv2.imshow('Video', frame)
Ejemplo n.º 37
0
knownEncodings = []
knownNames = []

for (i, imagePath) in enumerate(imagePaths):
    # extract the person name from the image path
    print("[INFO] processing image {}/{}".format(i + 1, len(imagePaths)))
    name = imagePath.split(os.path.sep)[-2]

    # load the input image and convert it from BGR (OpenCV ordering)
    # to dlib ordering (RGB)
    image = cv2.imread(imagePath)
    rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    # detect the (x, y)-coordinates of the bounding boxes
    # corresponding to each face in the input image
    boxes = face_recognition.face_locations(rgb)

    # compute the facial embedding for the face
    encodings = face_recognition.face_encodings(rgb, boxes)

    # loop over the encodings
    for encoding in encodings:
        # add each encoding + name to our set of known names and
        # encodings
        knownEncodings.append(encoding)
        knownNames.append(name)
# dump the facial encodings + names to disk
print("[INFO] serializing encodings...")
data = {"encodings": knownEncodings, "names": knownNames}
print(data)
f = open("/home/pi/MirageSmartMirror/src/faceRecognitionEncodings/encodings",
Ejemplo n.º 38
0
# show output with facial landmarks
cv2.imshow("Landmarks", image)

# load the known faces and embeddings
print("[INFO] loading encodings...")
data = pickle.loads(open(args["encodings"], "rb").read())

# load the input image and convert it from BGR to RGB
image = cv2.imread(args["image"])
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

# detect the (x, y) coordinates of the bounding box corresponding to
# each face inthe input image and compute facial embeddings for each face
print("[INFO] recognizing faces...")
boxes = face_recognition.face_locations(rgb, model = args["detection_method"])
encodings = face_recognition.face_encodings(rgb, boxes)

# initialize the list of names of detected faces
names = []

# loop over facial embeddings
for encoding in encodings:
    # compares each face in the input image to our known encodings
    matches = face_recognition.compare_faces(data["encodings"], encoding)
    name = "Unknown"

    # check if  match is found or not
    if True in matches:
        #find the indexes of all matches and initialize a dictionary
        # to count number of times a match occur
Ejemplo n.º 39
0
# loop over frames from the video file stream
while True:
    # grab the frame from the threaded video stream
    frame = vs.read()

    # convert the input frame from BGR to RGB then resize it to have
    # a width of 750px (to speedup processing)
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    rgb = imutils.resize(frame, width=750)
    r = frame.shape[1] / float(rgb.shape[1])

    # detect the (x, y)-coordinates of the bounding boxes
    # corresponding to each face in the input frame, then compute
    # the facial embeddings for each face
    boxes = face_recognition.face_locations(rgb,
                                            model=args["detection_method"])
    encodings = face_recognition.face_encodings(rgb, boxes)
    names = []

    # loop over the facial embeddings
    for encoding in encodings:
        # attempt to match each face in the input image to our known
        # encodings
        matches = face_recognition.compare_faces(data["encodings"],
                                                 encoding,
                                                 tolerance=0.4)
        name = "Unknown"

        # check to see if we have found a match
        if True in matches:
            # find the indexes of all matched faces then initialize a
Ejemplo n.º 40
0
def main():
  # 処理プログラムの初期化
  process_this_frame = True

  while True:
    # ビデオの単一フレームを取得
    _, frame = video_capture.read()

    # 時間を節約するために、フレーム毎に処理をスキップ
    if process_this_frame:
      # 画像を縦1/4 横1/4に圧縮
      small_frame = cv2.resize(frame, (0,0), fx=0.25, fy=0.25)

      # 顔の位置情報を検索
      face_locations = face_recognition.face_locations(small_frame)

      # 顔画像の符号化
      face_encodings = face_recognition.face_encodings(small_frame, face_locations)

      # 名前配列の初期化
      face_names = []

      for face_encoding in face_encodings:
        # 顔画像が登録画像と一致しているか検証
        matches = face_recognition.compare_faces(known_face_encodings, face_encoding, threshold)
        name = "Unknown"

        # 顔画像と最も近い登録画像を候補とする
        face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
        best_match_index = np.argmin(face_distances)
        if matches[best_match_index]:
          name = known_face_names[best_match_index]

        face_names.append(name)

    # 処理フラグの切替
    process_this_frame = not process_this_frame

    # 位置情報の表示
    # for (top, right, bottom, left) in face_locations:
    for (top, right, bottom, left),name in zip(face_locations, face_names):

      # 圧縮した画像の座標を復元
      top *= 4
      right *= 4
      bottom *= 4
      left *= 4

      # 顔領域に枠を描画
      cv2.rectangle(frame, (left, top), (right, bottom), (0,0,255), 2)

      # 顔領域の下に枠を表示
      cv2.rectangle(frame, (left, bottom-35), (right, bottom), (0.0,255), cv2.FILLED)
      # font = cv2.FONT_HERSHEY_COMPLEX
      # cv2.putText(frame, name, (left + 6, bottom -6), font, 1.0, (255, 255, 255), 1)

      #日本語表示
      fontpath = 'klee.ttc'
      font = ImageFont.truetype(fontpath, 32)
      img_pil = Image.fromarray(frame)
      draw = ImageDraw.Draw(img_pil)
      position = (left + 6, bottom - 40)

      # drawにテキストを記載
      draw.text(position, name, font=font, fill=(255,255,255,0))
      frame = np.array(img_pil)

      # 本人確認
      if mode == 1 and name != "Unknown":
        check_password(name)

    # 結果をビデオに表示
    cv2.imshow('Video', frame)

    # ESCキーで終了
    if cv2.waitKey(1) == 27:
      break
from PIL import Image
import face_recognition

# Load the jpg file into a numpy array
image = face_recognition.load_image_file("biden.jpg")

# Find all the faces in the image using a pre-trained convolutional neural network.
# This method is more accurate than the default HOG model, but it's slower
# unless you have an nvidia GPU and dlib compiled with CUDA extensions. But if you do,
# this will use GPU acceleration and perform well.
# See also: find_faces_in_picture.py
face_locations = face_recognition.face_locations(image, number_of_times_to_upsample=0, model="cnn")

print("I found {} face(s) in this photograph.".format(len(face_locations)))

for face_location in face_locations:

    # Print the location of each face in this image
    top, right, bottom, left = face_location
    print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))

    # You can access the actual face itself like this:
    face_image = image[top:bottom, left:right]
    pil_image = Image.fromarray(face_image)
    pil_image.show()
 time=time.split(':')                                        #time taken in hour and minute by spliting and converting it in integer
 for i in [0,1,2]:
     time[i]=int(time[i])
 hour=time[0]
 minute=time[1]
 if hour>=10 and hour<16 and minute<=5 and hour!=13:        #attendance taken avery hour means of each peiod bw 10 am and 4 pm except 1 pm (lunch) attendance will be continue for 5 minute
     
     print('attendence is going on')
     if condition==True:
         global attendance
         attendence=np.zeros((1,len(name)))
         condition=False
     date=date.today()
     status,image=v.read()
     if status==True:
         fL=fc.face_locations(image)
         if(len(fL)>0):
             for [x1,y1,x2,y2] in fL:
                 
                 cv.rectangle(image,(y2,x1),(y1,x2),(255,255,255),5)
                 E=fc.face_encodings(image,fL)[0]
                 res=fc.compare_faces(en_data,E)
                 if True in res:
                     font=cv.FONT_HERSHEY_SIMPLEX                                        #FRame and name on image 
                     text=name[int(label[res.index(True)])]
                     cv.putText(image,text,(y2,x1),font,0.8,(0,255,0),2,cv.LINE_AA)
                     cv.imshow('my image',image)
                     cv.waitKey(3)
                     attendence[0,int(label[res.index(True)])]=1
                     att_sheet[str(date)+'-'+str(period)]=attendence[0]                      #attendance 0 becos it a 2d matrix so tacken 1st row
                     att_sheet.to_csv(r'C:\Users\dell\AppData\Local\Programs\Python\Python36\name_email_dataset.csv')
import cv2
import numpy as np
import face_recognition

# Load and encode images
imgElon = face_recognition.load_image_file('ImageBasic/Elon Musk.jpg')
imgElon = cv2.cvtColor(imgElon, cv2.COLOR_BGR2RGB)
imgTest = face_recognition.load_image_file('ImageBasic/elon musky test.jpg')
imgTest = cv2.cvtColor(imgTest, cv2.COLOR_BGR2RGB)

imgMads = face_recognition.load_image_file(
    'ImageAttendance/Mads Mikkelsen.jpg')
imgMads = cv2.cvtColor(imgMads, cv2.COLOR_BGR2RGB)

# place square around detected face
faceLoc = face_recognition.face_locations(imgElon)[0]
encodeElon = face_recognition.face_encodings(imgElon)[0]
#print(faceLoc)
cv2.rectangle(imgElon, (faceLoc[3], faceLoc[0]), (faceLoc[1], faceLoc[2]),
              (255, 0, 230), 3)

faceLocTest = face_recognition.face_locations(imgTest)[0]
encodeTest = face_recognition.face_encodings(imgTest)[0]
cv2.rectangle(imgTest, (faceLocTest[3], faceLocTest[0]),
              (faceLocTest[1], faceLocTest[2]), (255, 0, 230), 3)

# compare
results = face_recognition.compare_faces([encodeElon], encodeTest)
faceDis = face_recognition.face_distance([encodeElon], encodeTest)
print(results, faceDis)
cv2.putText(imgTest, f'{results} {round(faceDis[0], 2)}', (50, 50),
Ejemplo n.º 44
0
from PIL import Image
import face_recognition

# Load the jpg file into a numpy array
image = face_recognition.load_image_file("images/abc38.JPG")

face_locations = face_recognition.face_locations(image)

print("I found {} face(s) in this photograph.".format(len(face_locations)))

for face_location in face_locations:

    # Print the location of each face in this image
    top, right, bottom, left = face_location
    print(
        "A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}"
        .format(top, left, bottom, right))

    # You can access the actual face itself like this:
    face_image = image[top:bottom, left:right]
    pil_image = Image.fromarray(face_image)
    pil_image.show()
Ejemplo n.º 45
0
            y2 = int(object['Box']['ymin'])
            x1 = int(object['Box']['xmin'])
            y1 = int(object['Box']['ymax'])
            x2 = int(object['Box']['xmax'])

            roi = img[y2:y1, x1:x2, :]
            if roi.size == 0:
                continue
            #cv2.imshow('roi', roi)
            #cv2.waitKey(0)

            # detect the (x, y)-coordinates of the bounding boxes corresponding
            # to each face in the input image
            # Do not increase upsample. System will crash from out of memory.
            rgb = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
            box = face_recognition.face_locations(
                rgb, number_of_times_to_upsample=1, model=FACE_DET_MODEL)

            # initialize the list of names for each face detected
            names = []

            if not box:
                print('no face detected...skipping face rec')
                names = [None]
            else:
                # Carve out face roi from object roi.
                face_top, face_right, face_bottom, face_left = box[0]
                face_roi = roi[face_top:face_bottom, face_left:face_right, :]
                #cv2.imshow('face roi', face_roi)
                #cv2.waitKey(0)

                # Compute the focus measure of the face
Ejemplo n.º 46
0
def predict():
	print(known_face_names)
	people_count = len(known_face_encodings)
	# video_capture = cv2.VideoCapture(0)
	f = open('face_encodings.pkl', 'ab')
	f_ = open('face_names.pkl', 'ab')
	pickler = cPickle.Pickler(f)
	pickler_ = cPickle.Pickler(f_)
	# Initialize some variables

	# print(known_face_names)
	face_locations = []
	face_encodings = []
	face_names = []
	process_this_frame = True
	time_to_predict = 1000   #Time it takes to predict
	NI_count = 0
	ucnt = 0
	ncnt = 0
	fin_name = ''
	start = time.time()
	video_capture = cv2.VideoCapture(1)
	while True:
			print("Number of people : ", len(known_face_encodings))
			faceT = {"len": len(known_face_encodings)}
			import json
			import codecs
			"""with open('4forces3.json', 'wb') as f:
				json.dump(faceT, codecs.getwriter('utf-8')(f), ensure_ascii=False)
			testp = path + '../FINAL/src/3.txt'
			print(os.path.exists(testp))
			if(os.path.exists(testp)):
				frame = cv2.imread(path+'../FINAL/src/1.jpg')
			else:
				frame = cv2.imread(path+'../FINAL/src/2.jpg')"""
			# frame = cv2.imread('images.jpg')
			ret, frame = video_capture.read()
			frame= frame.astype('uint8')
			small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
			rgb_small_frame = small_frame[:, :, ::-1]
			#cv2.imshow("temp",rgb_small_frame)
			if process_this_frame:
				# Find all the faces and face encodings in the current frame of video
				face_locations = face_recognition.face_locations(rgb_small_frame)
				face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)

				print(face_locations)

				face_names = []
				for face_encoding in face_encodings:
					matches = face_recognition.compare_faces(known_face_encodings, face_encoding , tolerance=0.6)
					name = "Unknown"

					if True in matches:
						first_match_index = matches.index(True)
						name = known_face_names[first_match_index]
					else:
						people_count += 1
						# print(known_face_names)
						# add_to_known(frame)
						known_face_encodings.append(face_encoding)
						pickler.dump(face_encoding)
						known_face_names.append(len(known_face_names))
						pickler_.dump(len(known_face_names))




			process_this_frame = not process_this_frame

			print(face_names)
			face_locations =[face_locations]
			print(face_locations)
			# Display the results
			for (top, right, bottom, left), name in zip(face_locations, face_names):
				# Scale back up face locations since the frame we detected in was scaled to 1/4 size
				top *= 4
				right *= 4
				bottom *= 4
				left *= 4
				print("here")
				# Draw a box around the face
				cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 0), 2)
				# Draw a label with a name below the face
				# cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255))
				font = cv2.FONT_HERSHEY_DUPLEX
				cv2.putText(frame, name, (left + int((right-left)/2), bottom + 16), font, 0.5, (255, 255, 255), 1)

			# Display the resulting image
			cv2.imshow('Video', frame)
			if len(face_names) == 1:
				if name == 'Unknown':
					ucnt += 1
				else :
					ncnt += 1

				if len(face_names)==1:
					# print("Predicted user : "******"FINAL PREDICTION: ",fin_name)
					print("Accuracy = ",ncnt/(ncnt+ucnt)*100,"%")
				elif len(face_names) >= 1:
					print('Too many faces in the frame')
				else :
					print("Could Not Identify face")
				video_capture.release()
				
				cv2.destroyAllWindows()
				break
import cv2
import face_recognition

img1 = face_recognition.load_image_file('obama.jpg')
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img1Test = face_recognition.load_image_file('obama2.jpg')
img1Test = cv2.cvtColor(img1Test, cv2.COLOR_BGR2RGB)

face = face_recognition.face_locations(img1)[0]
print(face)
encodeFace = face_recognition.face_encodings(img1)[0]
print(encodeFace)
cv2.rectangle(img1, (face[3], face[0]), (face[1], face[2]), (255, 0, 255), 2)

faceTest = face_recognition.face_locations(img1Test)[0]
encodeTestFace = face_recognition.face_encodings(img1Test)[0]
cv2.rectangle(img1Test, (faceTest[3], faceTest[0]), (faceTest[1], faceTest[2]), (255, 0, 255), 2)

results = face_recognition.compare_faces([encodeFace], encodeTestFace)
faceDis = face_recognition.face_distance([encodeFace], encodeTestFace)
print(results, faceDis)
cv2.putText(img1Test, f'{results} {round(faceDis[0], 2)}', (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)

cv2.imshow('Obama', img1)
cv2.imshow('Obama Test', img1Test)
cv2.waitKey(0)
cv2.destroyAllWindows()
Ejemplo n.º 48
0
    def play():

        # The original code of this function can be found here: https://github.com/ageitgey/face_recognition

        print("Face Recognition (FR) is started")

        # Fetch images from face database
        images = utils.get_all_image_path_from_db()

        # Get a reference to webcam #0 (the default one)
        video_capture = cv2.VideoCapture(0)

        # Encode images
        known_face_encodings, known_face_names = utils.encode_images(images)

        # Initialize some variables
        face_locations = []
        face_encodings = []
        face_names = []
        process_this_frame = True

        unknown_c = 0
        unknown_images = []

        greeted_people = []

        while True:

            if unknown_c == 100:
                unknown_c = 0
                temp_stop = True
            else:
                temp_stop = False

            # Grab a single frame of video
            ret, frame = video_capture.read()

            # Resize frame of video to 1/4 size for faster face recognition processing
            small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

            # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
            rgb_small_frame = small_frame[:, :, ::-1]

            unknown_locations = []

            # Only process every other frame of video to save time
            if process_this_frame:

                # Find all the faces and face encodings in the current frame of video
                # -CPU-
                face_locations = face_recognition.face_locations(
                    rgb_small_frame)

                # -GPU-
                # face_locations = face_recognition.face_locations(rgb_small_frame, number_of_times_to_upsample=0, model="cnn")

                face_encodings = face_recognition.face_encodings(
                    rgb_small_frame, face_locations)

                face_names = []
                idx = 0
                for face_encoding in face_encodings:
                    # See if the face is a match for the known face(s)
                    matches = face_recognition.compare_faces(
                        known_face_encodings, face_encoding)
                    name = "Unknown"

                    # # If a match was found in known_face_encodings, just use the first one.
                    # if True in matches:
                    #     first_match_index = matches.index(True)
                    #     name = known_face_names[first_match_index]

                    # Or instead, use the known face with the smallest distance to the new face
                    face_distances = face_recognition.face_distance(
                        known_face_encodings, face_encoding)
                    if len(face_distances) != 0:
                        best_match_index = np.argmin(face_distances)
                        if matches[best_match_index]:
                            name = known_face_names[best_match_index]

                            if name not in greeted_people:
                                modules.TextToSpeech.play("Welcome back " +
                                                          name,
                                                          repeat=False)
                                modules.TextToSpeech.play("Can i help you?",
                                                          repeat=False)
                                greeted_people.append(name)

                    if name == 'Unknown':
                        top, right, bottom, left = face_locations[idx][
                            0], face_locations[idx][1], face_locations[idx][
                                2], face_locations[idx][3]
                        top *= 4
                        right *= 4
                        bottom *= 4
                        left *= 4
                        crop_face = frame[top - config.crop_pad:bottom +
                                          config.crop_pad,
                                          left - config.crop_pad:right +
                                          config.crop_pad]
                        unknown_images.append(crop_face)
                        # cv2.imshow('Unknown', crop_face)
                        if temp_stop:
                            global_utils.show_module_log(
                                "FR - Speak your name!")
                            modules.TextToSpeech.play("What is your name?",
                                                      repeat=False)
                            while True:
                                name_for_unknown = modules.SpeechToText.play(
                                    return_value=True)
                                modules.TextToSpeech.play("Your name is " +
                                                          name_for_unknown +
                                                          ', right?',
                                                          repeat=False)
                                confirm_answer = modules.SpeechToText.play(
                                    return_value=True)
                                if (name_for_unknown !=
                                        '0') and ('yes' in confirm_answer):
                                    global_utils.show_module_log(
                                        "FR - Now i know you! Nice to meet you."
                                    )
                                    modules.TextToSpeech.play(
                                        "Nice to meet you " + name_for_unknown,
                                        repeat=False)
                                    greeted_people.append(name_for_unknown)
                                    break
                                else:
                                    global_utils.show_module_log(
                                        "FR - Try again!")
                                    modules.TextToSpeech.play(
                                        "Say your name again", repeat=False)
                            name_for_unknown = name_for_unknown
                            cv2.imwrite(
                                os.path.join(config.db_path,
                                             name_for_unknown + '.jpg'),
                                crop_face)
                            images = utils.get_all_image_path_from_db()
                            known_face_encodings, known_face_names = utils.encode_images(
                                images)
                            # cv2.destroyWindow('Unknown')

                    face_names.append(name)
                    idx += 1

            if len(unknown_images) != 0:
                unknown_c += 2
            else:
                if unknown_c != 0:
                    unknown_c -= 1
                    cv2.destroyWindow('Unknown')

            process_this_frame = not process_this_frame

            utils.display_bbox_in_image(frame, face_locations, face_names)

            # Hit 'q' on the keyboard to quit!
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        # Release handle to the webcam
        video_capture.release()
        cv2.destroyAllWindows()
Ejemplo n.º 49
0
varun_image = fr.load_image_file("varun.jpg")
varun_face_encoding = fr.face_encodings(varun_image)[0]

anerudh_image = fr.load_image_file("anerudh.jpg")
anerudh_face_encoding = fr.face_encodings(anerudh_image)[0]

known_face_encondings = [varun_face_encoding, anerudh_face_encoding]
known_face_names = ["Varun", "Anerudh"]

while True:
    ret, frame = video_capture.read()

    rgb_frame = frame[:, :, ::-1]

    face_locations = fr.face_locations(rgb_frame)
    face_encodings = fr.face_encodings(rgb_frame, face_locations)

    for (top, right, bottom,
         left), face_encoding in zip(face_locations, face_encodings):

        matches = fr.compare_faces(known_face_encondings, face_encoding)

        name = "Unknown"

        face_distances = fr.face_distance(known_face_encondings, face_encoding)

        best_match_index = np.argmin(face_distances)

        if matches[best_match_index]:
            name = known_face_names[best_match_index]
Ejemplo n.º 50
0
def run():
    print("RUN")
    process_this_frame = 0

    notify = notifier()

    timePeriod = 0

    notifyInterval = 600

    video_capture = cv2.VideoCapture(0)

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.2, fy=0.2)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame == 4:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(
                    known_face_encodings, face_encoding)
                name = "Unknown"

                # # If a match was found in known_face_encodings, just use the first one.
                if True in matches:
                    first_match_index = matches.index(True)
                    name = known_face_names[first_match_index]
                if False in matches:
                    print("Unknown face detected")
                    video_capture.release()
                    cv2.destroyAllWindows()
                    time.sleep(2)
                    print("Starting thread")
                    t1 = _thread.start_new_thread(run2, (q, ))

                    while q.empty():
                        print("q is empty")
                        continue
                    print("finished thread")
                    video_capture = cv2.VideoCapture(0)

                # Or instead, use the known face with the smallest distance to the new face
                face_distances = face_recognition.face_distance(
                    known_face_encodings, face_encoding)
                best_match_index = np.argmin(face_distances)
                if matches[best_match_index]:
                    name = known_face_names[best_match_index]

                face_names.append(name)
            process_this_frame = 0

        process_this_frame = process_this_frame + 1

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()
Ejemplo n.º 51
0
encMum = fr.face_encodings(mum)[0]
pop = fr.load_image_file(
    '/home/sahgan/Desktop/quarantineStuff/Udemy/ComputerVisionFacialRecognition/images/pop.jpg'
)
encPop = fr.face_encodings(pop)[0]

known_face_encodings = [
    encSah, encAkhil, encShamm, encNiharika, encShru, encMum, encPop
]
known_face_names = ['Sah', 'Akhil', 'Sham', 'Nih', 'Shru', 'Mum', 'Pop']
matchedFaceEnc = []

image_to_recognize = fr.load_image_file(
    '/home/sahgan/Desktop/quarantineStuff/Udemy/ComputerVisionFacialRecognition/images/22birthday.jpg'
)
all_locations = fr.face_locations(image_to_recognize, model='hog')
all_encodings = fr.face_encodings(image_to_recognize, all_locations)
print('There are {} no. of faces in the image'.format(len(all_locations)))
for current_face_location, current_face_encoding in zip(
        all_locations, all_encodings):
    matchedFaceEnc.clear()
    top, right, bottom, left = current_face_location
    print('Found face at top:{}, right:{}, bottom:{}, and left:{}'.format(
        top, right, bottom, left))
    matches = fr.compare_faces(known_face_encodings, current_face_encoding)
    print(matches)
    name_person = 'unknown'
    if True in matches:
        faceDist = fr.face_distance(known_face_encodings,
                                    current_face_encoding)
        index = np.where(faceDist == np.amin(faceDist))
Ejemplo n.º 52
0
def capture():
    # Get a reference to webcam #0 (the default one)
    video_capture = cv2.VideoCapture(0)

    # Initialize some variables
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()

        # Rotate 90 degrees
        #frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(rgb_small_frame)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []
            # with faceLock:
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                if len(known_face_encodings) == 0:
                    name = "Unknown"
                    face_names.append(name)
                    continue

                matches = face_recognition.compare_faces(
                    known_face_encodings, face_encoding)
                name = "Unknown"

                # # If a match was found in known_face_encodings, just use the first one.
                # if True in matches:
                #     first_match_index = matches.index(True)
                #     name = known_face_names[first_match_index]

                # Or instead, use the known face with the smallest distance to the new face
                face_distances = face_recognition.face_distance(
                    known_face_encodings, face_encoding)
                best_match_index = np.argmin(face_distances)
                if matches[best_match_index]:
                    name = known_face_names[best_match_index]
                    uploadCapture(frame)

                face_names.append(name)

        process_this_frame = not process_this_frame

        # Display the results
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)

        # Display the resulting image
        cv2.imshow('Video', frame)

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()
def train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False):
    """
    Trains a k-nearest neighbors classifier for face recognition.

    :param train_dir: directory that contains a sub-directory for each known person, with its name.

     (View in source code to see train_dir example tree structure)

     Structure:
        <train_dir>/
        ├── <person1>/
        │   ├── <somename1>.jpeg
        │   ├── <somename2>.jpeg
        │   ├── ...
        ├── <person2>/
        │   ├── <somename1>.jpeg
        │   └── <somename2>.jpeg
        └── ...

    :param model_save_path: (optional) path to save model on disk
    :param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified
    :param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
    :param verbose: verbosity of training
    :return: returns knn classifier that was trained on the given data.
    """
    X = []
    y = []

    # Loop through each person in the training set
    for class_dir in os.listdir(train_dir):
        if not os.path.isdir(os.path.join(train_dir, class_dir)):
            continue

        # Loop through each training image for the current person
        for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)):
            image = face_recognition.load_image_file(img_path)
            face_bounding_boxes = face_recognition.face_locations(image)

            if len(face_bounding_boxes) != 1:
                # If there are no people (or too many people) in a training image, skip the image.
                if verbose:
                    print("Image {} not suitable for training: {}".format(img_path, "Didn't find a face" if len(face_bounding_boxes) < 1 else "Found more than one face"))
            else:
                # Add face encoding for current image to the training set
                X.append(face_recognition.face_encodings(image, known_face_locations=face_bounding_boxes)[0])
                y.append(class_dir)

    # Determine how many neighbors to use for weighting in the KNN classifier
    if n_neighbors is None:
        n_neighbors = int(round(math.sqrt(len(X))))
        if verbose:
            print("Chose n_neighbors automatically:", n_neighbors)

    # Create and train the KNN classifier
    knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')
    knn_clf.fit(X, y)

    # Save the trained KNN classifier
    if model_save_path is not None:
        with open(model_save_path, 'wb') as f:
            pickle.dump(knn_clf, f)

    return knn_clf
Ejemplo n.º 54
0
def main_loop():
    # Get access to the webcam. The method is different depending on if this is running on a laptop or a Jetson Nano.
    if running_on_jetson_nano():
        # Accessing the camera with OpenCV on a Jetson Nano requires gstreamer with a custom gstreamer source string
        video_capture = cv2.VideoCapture(get_jetson_gstreamer_source(),
                                         cv2.CAP_GSTREAMER)
    else:
        # Accessing the camera with OpenCV on a laptop just requires passing in the number of the webcam (usually 0)
        # Note: You can pass in a filename instead if you want to process a video file instead of a live camera stream
        video_capture = cv2.VideoCapture(0)

    # Track how long since we last saved a copy of our known faces to disk as a backup.
    number_of_faces_since_save = 0

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Find all the face locations and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(
            rgb_small_frame, face_locations)

        # Loop through each detected face and see if it is one we have seen before
        # If so, we'll give it a label that we'll draw on top of the video.
        face_labels = []
        for face_location, face_encoding in zip(face_locations,
                                                face_encodings):
            # See if this face is in our list of known faces.
            metadata = lookup_known_face(face_encoding)

            # If we found the face, label the face with some useful information.
            if metadata is not None:
                time_at_door = datetime.now(
                ) - metadata['first_seen_this_interaction']
                face_label = f'At door {int(time_at_door.total_seconds())}s'

            # If this is a brand new face, add it to our list of known faces
            else:
                face_label = 'New visitor!'

                # Grab the image of the the face from the current frame of video
                top, right, bottom, left = face_location
                face_image = small_frame[top:bottom, left:right]
                face_image = cv2.resize(face_image, (150, 150))

                # Add the new face to our known face data
                register_new_face(face_encoding, face_image)

            face_labels.append(face_label)

        # Draw a box around each face and label each face
        for (top, right, bottom,
             left), face_label in zip(face_locations, face_labels):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            cv2.putText(frame, face_label, (left + 6, bottom - 6),
                        cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 1)

        # Display recent visitor images
        number_of_recent_visitors = 0
        for metadata in known_face_metadata:
            # If we have seen this person in the last minute, draw their image
            if datetime.now() - metadata['last_seen'] < timedelta(
                    seconds=10) and metadata['seen_frames'] > 5:
                # Draw the known face image
                x_position = number_of_recent_visitors * 150
                frame[30:180,
                      x_position:x_position + 150] = metadata['face_image']
                number_of_recent_visitors += 1

                # Label the image with how many times they have visited
                visits = metadata['seen_count']
                visit_label = f'{visits} visits'
                if visits == 1:
                    visit_label = 'First visit'
                cv2.putText(frame, visit_label, (x_position + 10, 170),
                            cv2.FONT_HERSHEY_DUPLEX, 0.6, (255, 255, 255), 1)

        if number_of_recent_visitors > 0:
            cv2.putText(frame, 'Visitors at Door', (5, 18),
                        cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 1)

        # Display the final frame of video with boxes drawn around each detected fames
        cv2.imshow('Video', frame)

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            save_known_faces()
            break

        # We need to save our known faces back to disk every so often in case something crashes.
        if len(face_locations) > 0 and number_of_faces_since_save > 100:
            save_known_faces()
            number_of_faces_since_save = 0
        else:
            number_of_faces_since_save += 1

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()
Ejemplo n.º 55
0
face_locations = []


while(1):
	ret,frame = cap.read()

	frame = cv2.flip(frame,1)
	frame = cv2.copyMakeBorder(frame,0,0,100,0,cv2.BORDER_CONSTANT,value=[0,0,0])

	small_frame = cv2.resize(frame, (0,0), fx=0.25, fy=0.25)

	#process_this_frame = True

	faces = []

	face_locations = face_recognition.face_locations(small_frame)
	face_encodings  = face_recognition.face_encodings(small_frame, face_locations)

	for face_en in face_encodings:
		distances = face_recognition.face_distance(student_encodings,face_en)
		index, value = min(enumerate(distances), key=operator.itemgetter(1))
		if(value<=0.47):
			faces.append(students[index][1])
		else:
			faces.append("Unknown")


	print len(face_encodings), len(faces)

	if(len(faces)>0):
		for (top,right,bottom,left),name in zip(face_locations, faces):
import face_recognition
import glob

known_faces_encodings = dict()
test_image_encodings = []
quantity_faces = []

for i in glob.glob(
        "/home/alexandr/PycharmProjects/chatbot/base_of_photos/*.jpg"):
    photo = face_recognition.load_image_file(i)
    #print(face_recognition.face_encodings(photo))
    if not face_recognition.face_encodings(photo) == []:
        known_faces_encodings[i] = face_recognition.face_encodings(photo)[0]

for i in glob.glob("/home/alexandr/PycharmProjects/chatbot/photos/*.jpg"):
    test_image = face_recognition.load_image_file(i)
    test_image_locations = face_recognition.face_locations(test_image)
    #print(test_image_encodings)
    if not test_image_locations == []:
        test_image_encoding = face_recognition.face_encodings(
            test_image, test_image_locations)[0]
        test_image_encodings.append(test_image_encoding)
        quantity_faces.append(test_image_locations)
Ejemplo n.º 57
0
def main():
    # GETTING KNOWN ENCODINGS AND NAMES
    home = str(os.path.dirname(os.path.abspath(__file__))) + "/../../"
    known_encodings_file_path = home + "/data/known_encodings_file.csv"
    people_file_path = home + "/data/people_file.csv"
    # For storing the encoding of a face
    known_encodings_file = Path(known_encodings_file_path)
    if known_encodings_file.is_file():
        known_encodings = np.genfromtxt(known_encodings_file, delimiter=',')
    else:
        known_encodings = []

    # #For Storing the name corresponding to the encoding
    people_file = Path(people_file_path)
    if people_file.is_file():
        people = np.genfromtxt(people_file, dtype='U',delimiter=',')
    else:
        people = []



# MAIN WORK

    #Capture Video indefinitely
    video_capture = cv2.VideoCapture(0)
    # time.delay(2)
    # TODO: GET FROM DATABASE
    # known encodings of persons in database.
    # known_encodings = []
    # people = []

    #Some important variables
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True
    #Eat the Meat, Hmm process the image
    while True:

        # 
        #     1.) Capture the frame from the video.
        #     2.) Compress it to its 1/4th size for faster speed.
        #     3.) If this frame has to be processed, find face_location, face_encodings.
        #     4.) Match with the known_encodings and set the name for each face else Unknown
        #     5.) Add a border around face.
        #         if RED: 
        #             unverified or not authenticated
        #         elif GREEN:
        #             everything OK ;)
        #     6.) Show the frame 
        # 
        ret, frame = video_capture.read()

        #smaller frame 1/4th of original size
        small_frame = cv2.resize(frame, (0,0), fx=.25, fy=.25)

        if process_this_frame:
            #Find the face locations
            face_locations = face_recognition.face_locations(small_frame)
            #Find the face encodings 128 Dimensional!!
            face_encodings = face_recognition.face_encodings(small_frame, face_locations)

            face_names=[]
            other = 0 #Count of un-authorised people
            for face_encoding in face_encodings:
                match = face_recognition.compare_faces(known_encodings, face_encoding)
                name = "Unknown"

                #Find if this person is in the present people array
                for i in range(len(match)):
                    if match[i]:
                        name = people[i]
                        break

                if "Unknown" in name:
                    other += 1
                    name += str(other)
                face_names.append(name)
        
        # Send the names of the people to the parent process
        # os.write(3,b'{"dt" : "This is a test"}')
        print(face_names, flush=True)
            
        process_this_frame = not process_this_frame
        

        #Display the border
        for (top, right, bottom, left),name in zip(face_locations, face_names):
            #Scale up the coordinates by 4 to get face
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            #Assuming person in authenticated
            color =  (0,255,0)  #GREEN
            if not authorised(name):
                #Unauthenticated person
                color = (0,0,255) #RED
                #print so that parent process in Node.js can use it
                print(name,flush=True)

            #Display border
            cv2.rectangle(frame, (left,top), (right,bottom), color, 2)

            # Draw a label with name
            cv2.rectangle(frame, (left,bottom-35), (right, bottom), color, cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name,(left+6, bottom-6), font, 1.0, (255,255,255), 1)

        # Display the resulting image with borders and names
        cv2.imshow('Video', frame)

        # Hit 'q' on keyboard to quit
        if cv2.waitKey(100) == 27:
            break
            
    #Release handle to the webcam
    video_capture.release()
    cv2.closeAllWindows()
def recognize_faces(known_face_encodings, known_face_names, webcam):
    ##    known_face_encodings = ['encodings']
    ##    known_face_names = data['names']
    ##    print (known_face_names)
    # Initialize some variables
    face_locations = []
    face_encodings = []
    face_names = []

    video_capture = webcam

    process_this_frame = True

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        rgb_small_frame = small_frame[:, :, ::-1]

        # Only process every other frame of video to save time
        if process_this_frame:
            # Find all the faces and face encodings in the current frame of video
            face_locations = face_recognition.face_locations(
                rgb_small_frame, number_of_times_to_upsample=3)
            face_encodings = face_recognition.face_encodings(
                rgb_small_frame, face_locations)

            face_names = []
            for face_encoding in face_encodings:
                # See if the face is a match for the known face(s)
                matches = face_recognition.compare_faces(
                    known_face_encodings, face_encoding)
                name = "Unknown"

                # If a match was found in known_face_encodings, just use the first one.
                if True in matches:
                    first_match_index = matches.index(True)
                    name = known_face_names[first_match_index]

                face_names.append(name)

        process_this_frame = not process_this_frame

        # Display the results
        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            # Draw a box around the face
            cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

            # Draw a label with a name below the face
            cv2.rectangle(frame, (left, bottom - 35), (right, bottom),
                          (0, 0, 255), cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                        (255, 255, 255), 1)

        # Display the resulting image
        cv2.imshow('Video', frame)

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # Release handle to the webcam
    video_capture.release()
    cv2.destroyAllWindows()
from PIL import Image
import face_recognition

# Load the jpg file into a numpy array
image = face_recognition.load_image_file("biden.jpg")

# Find all the faces in the image
face_locations = face_recognition.face_locations(image)

print("I found {} face(s) in this photograph.".format(len(face_locations)))

for face_location in face_locations:

    # Print the location of each face in this image
    top, right, bottom, left = face_location
    print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))

    # You can access the actual face itself like this:
    face_image = image[top:bottom, left:right]
    pil_image = Image.fromarray(face_image)
    pil_image.show()
process_this_frame = True

while True:
    # Grab a single frame of video
    ret, frame = video_capture.read()

    # Resize frame of video to 1/4 size for faster face recognition processing
    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    rgb_small_frame = small_frame[:, :, ::-1]

    # Only process every other frame of video to save time
    if process_this_frame:
        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(
            rgb_small_frame, face_locations)

        face_names = []
        for face_encoding in face_encodings:
            #  i not that idea with function this part with knn_clf but function by javiershaka
            name = str(knn_clf.predict(face_encodings)).replace(
                "'", "").replace("[", "").replace("]", "")

            face_names.append(name)

    process_this_frame = not process_this_frame

    # Display the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):