예제 #1
0
파일: FaceFilter.py 프로젝트: Nioy/faceswap
 def __init__(self, reference_file_paths, nreference_file_paths, threshold = 0.6):
     images = list(map(face_recognition.load_image_file, reference_file_paths))
     nimages = list(map(face_recognition.load_image_file, nreference_file_paths))
     # Note: we take only first face, so the reference file should only contain one face.
     self.encodings = list(map(lambda im: face_recognition.face_encodings(im)[0], images))
     self.nencodings = list(map(lambda im: face_recognition.face_encodings(im)[0], nimages))
     self.threshold = threshold
예제 #2
0
파일: detect.py 프로젝트: cosmonautd/turret
def face_recognition(frame, drawboxes=True):
    """ Perform face recognition using face_recognition package
    """
    global database, facedatabase, facedatabase_encodings, fraction

    # Define standard found state
    found = False

    # Initialize face database if not already initialized
    if (not database) or (not facedatabase) or (not facedatabase_encodings):
        database = list()
        # Search for known faces in faces/ directory
        for (_, _, filenames) in os.walk('faces'):
            database.extend(filenames)
            break
        # Populate face database and generate face encodings
        facedatabase = [fc.load_image_file(os.path.join('faces', name)) for name in database]
        facedatabase_encodings = [fc.face_encodings(face)[0] for face in facedatabase]
    
    # Create a resized copy of the frame in order to speed up processing
    small_frame = cv2.resize(frame, (0, 0), fx=fraction, fy=fraction)

    # Detect faces and generate their encodings
    face_locations = fc.face_locations(small_frame)
    face_encodings = fc.face_encodings(small_frame, face_locations)

    # Recognize faces if found
    if len(face_encodings) > 0:

        found = True

        # Recognize faces and determine their names
        face_names = []
        for face_encoding in face_encodings:
            match = fc.compare_faces(facedatabase_encodings, face_encoding, tolerance=0.5)
            try: name = database[match.index(True)].split('.')[0]
            except ValueError: name = "Unknown"
            face_names.append(name)
        
        # Draw a rectangle and name around recognized faces if required
        if drawboxes:
            for (top, right, bottom, left), name in zip(face_locations, face_names):
                if name != "Unknown":
                    top = int((1/fraction)*top - 16)
                    right = int((1/fraction)*right + 16)
                    bottom = int((1/fraction)*bottom + 16)
                    left = int((1/fraction)*left - 16)
                    cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
                    cv2.rectangle(frame, (left-1, top - 20), (max(right+1, left+12*len(name)), top), (0, 0, 255), cv2.FILLED)
                    font = cv2.FONT_HERSHEY_DUPLEX
                    cv2.putText(frame, name, (left + 6, top - 6), font, 0.5, (255, 255, 255), 1)
    
    # Return frame and found state
    return frame, found
예제 #3
0
        def match_fono(location_, image_):
            top, right, bottom, left = location_
            # You can access the actual face itself like this:
            face_image = image_[top:bottom, left:right]
            try:
                unknown_face_encoding = face_recognition.face_encodings(face_image, num_jitters=NUM_JITTERS)[0]
                recos = face_recognition.compare_faces(
                    self.known_faces_list, unknown_face_encoding, tolerance=TOLERANCE)
            except:
                recos = []

            unk = "UNK{n}@unk{n}@00@"
            z = hash(location_)

            face_dict = {
                self.known_faces[index] if is_there else unk.format(n=z): location_ for index, is_there in
                enumerate(recos)}
            # face_dict = {
            #     self.known_faces[index]: location_ for index, is_there in
            #     enumerate(recos) if is_there}
            # print(
            #     "location Locs: {}  Top: {}, Left: {}, Bottom: {}, Right: {}"
            #     .format(face_dict, top, left, bottom, right))
            if any(("@unk" not in name) or ("@Fono-" in name) for name in face_dict.keys()):
                face_dict = {name: location for name, location in face_dict.items()
                             if ("@unk" not in name) or ("@Fono-" in name)}
            else:
                if face_dict.keys():
                    pil_image = Imger.fromarray(face_image)
                    pil_image.save("fono17_2/@Fono-{nome}.png".format(nome=face_dict.keys()[0]), "PNG")

            print("location Locs: {}  ".format(face_dict))
            return face_dict
예제 #4
0
파일: sort.py 프로젝트: Nioy/faceswap
    def sort_face(self):
        input_dir = self.args.input_dir

        print("Sorting by face similarity...")

        img_list = [[x, face_recognition.face_encodings(cv2.imread(x))]
                    for x in
                    tqdm(self.find_images(input_dir),
                         desc="Loading",
                         file=sys.stdout)]

        img_list_len = len(img_list)
        for i in tqdm(range(0, img_list_len - 1),
                      desc="Sorting",
                      file=sys.stdout):
            min_score = float("inf")
            j_min_score = i + 1
            for j in range(i + 1, len(img_list)):
                f1encs = img_list[i][1]
                f2encs = img_list[j][1]
                if f1encs is not None and f2encs is not None and len(
                        f1encs) > 0 and len(f2encs) > 0:
                    score = face_recognition.face_distance(f1encs[0],
                                                           f2encs)[0]
                else:
                    score = float("inf")

                if score < min_score:
                    min_score = score
                    j_min_score = j
            img_list[i + 1] = img_list[j_min_score]
            img_list[j_min_score] = img_list[i + 1]

        return img_list
예제 #5
0
파일: sort.py 프로젝트: Nioy/faceswap
    def sort_face_dissim(self):
        input_dir = self.args.input_dir

        print("Sorting by face dissimilarity...")

        img_list = [[x, face_recognition.face_encodings(cv2.imread(x)), 0]
                    for x in
                    tqdm(self.find_images(input_dir),
                         desc="Loading",
                         file=sys.stdout)]

        img_list_len = len(img_list)
        for i in tqdm(range(0, img_list_len), desc="Sorting", file=sys.stdout):
            score_total = 0
            for j in range(0, img_list_len):
                if i == j:
                    continue
                try:
                    score_total += face_recognition.face_distance(
                        [img_list[i][1]],
                        [img_list[j][1]])
                except:
                    pass

            img_list[i][2] = score_total

        print("Sorting...")
        img_list = sorted(img_list, key=operator.itemgetter(2), reverse=True)
        return img_list
예제 #6
0
def recognize_face(face_file, known_faces_dir):

    os.chdir(known_faces_dir)

    names = []
    encoded_faces = []

    for file in glob.glob("*.jpg"):

        # try to open and read cached data
        # if there is no *.enc file, generate it
        enc_file_name = file[0:-4] + ".enc"
        try:
            encoded = _utils.get_encoded(file, enc_file_name)

        except _utils.FaceNotFoundError:
            continue

        names.append(file)
        encoded_faces.append(encoded)

    unknown_face_file = face_recognition.load_image_file(face_file)

    try:
        unknown_face_encoded = face_recognition.face_encodings(unknown_face_file)[0]
    except IndexError:
        return str()

    results = face_recognition.compare_faces(encoded_faces, unknown_face_encoded)

    for i in range(0, len(results)):
        if results[i]:
            return names[i]

    return str()
예제 #7
0
파일: FaceFilter.py 프로젝트: Nioy/faceswap
 def check(self, detected_face):
     # we could use detected landmarks, but I did not manage to do so. TODO The copy/paste below should help
     encodings = face_recognition.face_encodings(detected_face.image)
     if encodings is not None and len(encodings) > 0:
         distances = list(face_recognition.face_distance(self.encodings, encodings[0]))
         distance = avg(distances)
         mindistance = min(distances)
         maxdistance = max(distances)
         if distance > self.threshold:
             print("Distance above threshold: %f < %f" % (distance, self.threshold))
             return False
         if len(self.nencodings) > 0:
           ndistances = list(face_recognition.face_distance(self.nencodings, encodings[0]))
           ndistance = avg(ndistances)
           nmindistance = min(ndistances)
           nmaxdistance = max(ndistances)
           if (mindistance > nmindistance):
               print("Distance to negative sample is smaller")
               return False
           if (distance > ndistance):
               print("Average distance to negative sample is smaller")
               return False
           # k-nn classifier
           K=min(5, min(len(distances), len(ndistances)) + 1)
           N=sum(list(map(lambda x: x[0],
                 list(sorted([(1,d) for d in distances] + [(0,d) for d in ndistances],
                             key=lambda x: x[1]))[:K])))
           ratio = N/K
           if (ratio < 0.5):
               print("K-nn is %.2f" % ratio)
               return False
         return True
     else:
         print("No face encodings found")
         return False
예제 #8
0
def itp():
	path = os.path.join("", "./Image_data")
	path = os.path.abspath(os.path.realpath(path))
	branch = raw_input("Branch (CS,EE,EC,CE,MM,ME) : ")
	path = path + "/" + branch
	semester = raw_input("Enter Semester (I,II,III,IV,V,VI,VII,VIII): ")
	path = path + "/" + semester

	faces = []

	for img in tqdm(os.listdir(path)):
		file_name = os.path.splitext(img)[0].split(' ')[0]
		img_file = cv2.imread(os.path.join(path, img))

		face_location = face_recognition.face_locations(img_file)[0]
		face_encoding = face_recognition.face_encodings(img_file)[0]

		faces.append([face_encoding, file_name])

	pkl_path = os.path.join("", "./students/" + branch + "_" + semester + ".pkl")
	pkl_path = os.path.abspath(os.path.realpath(pkl_path))

	with open(pkl_path, "wb") as file:
		pickle.dump(faces, file)

	return
예제 #9
0
파일: video.py 프로젝트: DeliangFan/face
    def start(self):
        count = 0
        face_locations, face_encoding, face_names = [], [], []
        
        while True:
            count = count + 1
            ret, frame = self.video_capture.read()

            if FAST_PROCESS: 
                ratio = 1.0 / FAST_FRAG
                recog_frame = cv2.resize(frame, (0, 0), fx=ratio, fy=ratio)
            else:
                recog_frame = frame
            rgb_recog_frame = recog_frame[:, :, ::-1]

            if count >= INTERVAL:
                count = 0
                # Note(laofan), I find that the cnn is better than hogs while costs lots of computation.
                face_locations = face_recognition.face_locations(
                    rgb_recog_frame,
                    number_of_times_to_upsample=1,
                    model="hog")

                if len(face_locations) > 0:
                    face_encodings = face_recognition.face_encodings(rgb_recog_frame, face_locations)
                    for face_encoding in face_encodings:
                        face_names = []
                        name = "Unknown"
                        matches = self.best_match(self.known_face_encodings, face_encoding, TOLERANCE)
                        if True in matches:
                            first_match_index = matches.index(True)
                            name = self.known_face_names[first_match_index]

                        face_names.append(name)
                else:
                    count = INTERVAL

            for (top, right, bottom, left), name in zip(face_locations, face_names):
                if FAST_PROCESS:
                    top *= FAST_FRAG
                    right *= FAST_FRAG
                    bottom *= FAST_FRAG
                    left *= FAST_FRAG

                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
                cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
                cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)

            cv2.imshow('Video', frame)

            # Hit 'q' on the keyboard to quit!
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        # Release handle to the webcam
        video_capture.release()
        cv2.destroyAllWindows()
예제 #10
0
def test_image(image_to_check, tolerance=0.6):
    recognized_faces = []

    unknown_image = face_recognition.load_image_file(image_to_check)

    # Scale down image if it's giant so things run a little faster
    unknown_image = scale_image(unknown_image)

    unknown_encodings = face_recognition.face_encodings(unknown_image)
    face_landmarks_list = face_recognition.face_landmarks(unknown_image)
    face_locations = face_recognition.face_locations(unknown_image)

    pil_image = Image.fromarray(unknown_image)
    d = ImageDraw.Draw(pil_image)

    if not unknown_encodings:
        # print out fact that no faces were found in image
        print_result(image_to_check, "no_persons_found", None)

    else:
        for unknown_encoding, face_landmarks, face_location in zip(unknown_encodings, face_landmarks_list,
                                                                   face_locations):
            distances = face_recognition.face_distance(known_face_encodings, unknown_encoding)

            for distance, name in zip(distances, known_names):
                if distance <= tolerance:
                    print_result(image_to_check, name, distance)
                    recognized_faces.append(
                        {'name': name, 'dist': distance, 'landmarks': face_landmarks, 'face_location': face_location}
                    )
                else:
                    print_result(image_to_check, "unknown_person", None)

        for item in recognized_faces:
            face_landmarks = item['landmarks']
            face_location = item['face_location']
            # Print the location of each facial feature in this image
            # Let's trace out each facial feature in the image with a line!
            for facial_feature in face_landmarks.keys():
                print("The {} in this face has the following points: {}".format(facial_feature,
                                                                                face_landmarks[facial_feature]))
                d.line(face_landmarks[facial_feature], width=3)

            # Print the location of each face in this image
            top, right, bottom, left = face_location
            print(
                "A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom,
                                                                                                      right))
            d.rectangle(((left, top), (right, bottom)), outline=4)
            font = ImageFont.truetype("font/arial.ttf", size=30)
            title = item['name']
            text_size = d.textsize(title, font)
            d.text((left, bottom - text_size[1]), title, font=font, fill='white')

    pil_image.save("data/recognition_results/result.jpg")

    return recognized_faces
예제 #11
0
 def check(self, detected_face):
     encodings = face_recognition.face_encodings(detected_face.image) # we could use detected landmarks, but I did not manage to do so. TODO The copy/paste below should help
     if encodings is not None and len(encodings) > 0:
         score = face_recognition.face_distance([self.encoding], encodings[0])
         print(score)
         return score <= self.threshold
     else:
         print("No face encodings found")
         return False
예제 #12
0
파일: bench.py 프로젝트: ihowson/ownphotos
def extract_faces(fname):
    image = face_recognition.load_image_file(fname)
    face_encodings = face_recognition.face_encodings(image)
    face_locations = face_recognition.face_locations(image)
    if len(face_locations) > 0:
        for face_location in face_locations:
            top,right,bottom,left = face_location
            face_image = image[top:bottom, left:right]
            pil_image = PIL.Image.fromarray(face_image)
    return {'encodings':face_encodings, 'locations':face_locations}
def trainFaces():
    print("---- Training Started ----")
    for root, dirs, files in os.walk("./faces"):
        for filename in files:
            file_result = filename.split("_")
            known_face_names.append(file_result[0])
            image = face_recognition.load_image_file("faces/"+filename)
            image_face_encoding = face_recognition.face_encodings(image)[0]
            known_face_encodings.append(image_face_encoding)
            print("Name: " + file_result[0])
    print("---- Training Completed ----")
def train(train_dir, model_save_path = "", n_neighbors = None, knn_algo = 'ball_tree', verbose=False):
    """
    Trains a k-nearest neighbors classifier for face recognition.

    :param train_dir: directory that contains a sub-directory for each known person, with its name.

     (View in source code to see train_dir example tree structure)

     Structure:
        <train_dir>/
        ├── <person1>/
        │   ├── <somename1>.jpeg
        │   ├── <somename2>.jpeg
        │   ├── ...
        ├── <person2>/
        │   ├── <somename1>.jpeg
        │   └── <somename2>.jpeg
        └── ...
    :param model_save_path: (optional) path to save model of disk
    :param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified.
    :param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
    :param verbose: verbosity of training
    :return: returns knn classifier that was trained on the given data.
    """
    X = []
    y = []
    for class_dir in listdir(train_dir):
        if not isdir(join(train_dir, class_dir)):
            continue
        for img_path in image_files_in_folder(join(train_dir, class_dir)):
            image = face_recognition.load_image_file(img_path)
            faces_bboxes = face_locations(image)
            if len(faces_bboxes) != 1:
                if verbose:
                    print("image {} not fit for training: {}".format(img_path, "didn't find a face" if len(faces_bboxes) < 1 else "found more than one face"))
                continue
            X.append(face_recognition.face_encodings(image, known_face_locations=faces_bboxes)[0])
            y.append(class_dir)


    if n_neighbors is None:
        n_neighbors = int(round(sqrt(len(X))))
        if verbose:
            print("Chose n_neighbors automatically as:", n_neighbors)

    knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')
    knn_clf.fit(X, y)

    if model_save_path != "":
        with open(model_save_path, 'wb') as f:
            pickle.dump(knn_clf, f)
    return knn_clf
def detect_faces_in_image(file_stream):
    # Pre-calculated face encoding of Obama generated with face_recognition.face_encodings(img)
    known_face_encoding = [-0.09634063,  0.12095481, -0.00436332, -0.07643753,  0.0080383,
                            0.01902981, -0.07184699, -0.09383309,  0.18518871, -0.09588896,
                            0.23951106,  0.0986533 , -0.22114635, -0.1363683 ,  0.04405268,
                            0.11574756, -0.19899382, -0.09597053, -0.11969153, -0.12277931,
                            0.03416885, -0.00267565,  0.09203379,  0.04713435, -0.12731361,
                           -0.35371891, -0.0503444 , -0.17841317, -0.00310897, -0.09844551,
                           -0.06910533, -0.00503746, -0.18466514, -0.09851682,  0.02903969,
                           -0.02174894,  0.02261871,  0.0032102 ,  0.20312519,  0.02999607,
                           -0.11646006,  0.09432904,  0.02774341,  0.22102901,  0.26725179,
                            0.06896867, -0.00490024, -0.09441824,  0.11115381, -0.22592428,
                            0.06230862,  0.16559327,  0.06232892,  0.03458837,  0.09459756,
                           -0.18777156,  0.00654241,  0.08582542, -0.13578284,  0.0150229 ,
                            0.00670836, -0.08195844, -0.04346499,  0.03347827,  0.20310158,
                            0.09987706, -0.12370517, -0.06683611,  0.12704916, -0.02160804,
                            0.00984683,  0.00766284, -0.18980607, -0.19641446, -0.22800779,
                            0.09010898,  0.39178532,  0.18818057, -0.20875394,  0.03097027,
                           -0.21300618,  0.02532415,  0.07938635,  0.01000703, -0.07719778,
                           -0.12651891, -0.04318593,  0.06219772,  0.09163868,  0.05039065,
                           -0.04922386,  0.21839413, -0.02394437,  0.06173781,  0.0292527 ,
                            0.06160797, -0.15553983, -0.02440624, -0.17509389, -0.0630486 ,
                            0.01428208, -0.03637431,  0.03971229,  0.13983178, -0.23006812,
                            0.04999552,  0.0108454 , -0.03970895,  0.02501768,  0.08157793,
                           -0.03224047, -0.04502571,  0.0556995 , -0.24374914,  0.25514284,
                            0.24795187,  0.04060191,  0.17597422,  0.07966681,  0.01920104,
                           -0.01194376, -0.02300822, -0.17204897, -0.0596558 ,  0.05307484,
                            0.07417042,  0.07126575,  0.00209804]

    # Load the uploaded image file
    img = face_recognition.load_image_file(file_stream)
    # Get face encodings for any faces in the uploaded image
    unknown_face_encodings = face_recognition.face_encodings(img)

    face_found = False
    is_obama = False

    if len(unknown_face_encodings) > 0:
        face_found = True
        # See if the first face in the uploaded image matches the known face of Obama
        match_results = face_recognition.compare_faces([known_face_encoding], unknown_face_encodings[0])
        if match_results[0]:
            is_obama = True

    # Return the result as json
    result = {
        "face_found_in_image": face_found,
        "is_picture_of_obama": is_obama
    }
    return jsonify(result)
예제 #16
0
def get_encoded(image_file, enc_file_name):

    # try to open and read cached data
    # if there is no *.enc file, generate it

    try:
        enc_file = open(enc_file_name, "rb")
        encoded_raw = enc_file.read()
        enc_file.close()

        if len(encoded_raw) == 0:      # file is empty? Treat it as not existing
            raise FileNotFoundError

        if encoded_raw == b"invalid":  # cache says we could not find face
            raise FaceNotFoundError

        encoded = pickle.loads(encoded_raw)

    except FileNotFoundError:          # no cache file, generate it
        print("Generating cache for file " + image_file)
        image = face_recognition.load_image_file(image_file)
        encodings = face_recognition.face_encodings(image)

        enc_file = open(enc_file_name, "ab")
        if len(encodings) > 0:
            encoded = face_recognition.face_encodings(image)[0]
            encoded_raw = pickle.dumps(encoded, protocol=0)
            enc_file.write(encoded_raw)
            enc_file.close()
        else:
            print("Could not find face")
            enc_file.write(b"invalid")
            enc_file.close()

            raise FaceNotFoundError

    return encoded
예제 #17
0
def get_face(img, target_encodings):
    img = np.array(img)
    locations = face_recognition.face_locations(img, model="cnn")
    encodings = face_recognition.face_encodings(img, locations)
    landmarks = face_recognition.face_landmarks(img, locations)
    if len(locations) == 0:
        return None, None, None, None, None
    if target_encodings is not None:
        distances = [ face_recognition.face_distance([target_encodings], encoding) for encoding in encodings ]
        idx_closest = distances.index(min(distances))
        target_face, target_landmarks = locations[idx_closest], landmarks[idx_closest]
    else:
        target_face, target_landmarks = locations[0], landmarks[0]
    top, right, bottom, left = target_face
    x, y, w, h = left, top, right-left, bottom-top
    return x, y, w, h, target_landmarks
예제 #18
0
파일: data.py 프로젝트: Yipsix/haleyVisma
def import_and_train_data():
    sql = '''INSERT INTO PERSONS (name, arr, picture) VALUES(?, ?, ?);'''
    cur = CONN.cursor()
    for picture_file in glob.glob('picturesTraining/*.jpg'):
        name = os.path.splitext(basename(picture_file))[0]
        if not cur.execute("SELECT name FROM persons WHERE name=?", (name, )).fetchone():
            print(name, ' is new.. Trains')
            with open(picture_file, 'rb') as input_file:
                ablob = input_file.read()
                image = face_recognition.load_image_file(picture_file)
                face_encoding = face_recognition.face_encodings(image)[0]
                CONN.execute(sql, [name, face_encoding, sqlite3.Binary(ablob)])
                print('done with: ', name)
        else:
            print(name, ' allready exist!')
    CONN.commit()
예제 #19
0
def scan_known_people(known_people_folder):
    for _file in image_files_in_folder(known_people_folder):
        file_path = os.path.basename(_file)
        print(file_path)
        basename = os.path.splitext(file_path)[0]
        img = face_recognition.load_image_file(_file)
        encodings = face_recognition.face_encodings(img)

        if len(encodings) > 1:
            print("WARNING: More than one face found in {}. Only considering the first face.".format(_file))

        if len(encodings) == 0:
            print("WARNING: No faces found in {}. Ignoring file.".format(_file))
        else:
            known_names.append(basename)
            known_face_encodings.append(encodings[0])
            print("{} found in {}".format(basename, file_path))
예제 #20
0
    def reload_images(self, group_method, img_list):
        """
        Reloads the image list by replacing the comparative values with those
        that the chosen grouping method expects.
        :param group_method: str name of the grouping method that will be used.
        :param img_list: image list that has been sorted by one of the sort
        methods.
        :return: img_list but with the comparative values that the chosen
        grouping method expects.
        """
        import_face_recognition()

        input_dir = self.args.input_dir
        print("Preparing to group...")
        if group_method == 'group_blur':
            temp_list = [[x, self.estimate_blur(cv2.imread(x))]
                         for x in
                         tqdm(self.find_images(input_dir), desc="Reloading", file=sys.stdout)]
        elif group_method == 'group_face':
            temp_list = [[x, face_recognition.face_encodings(cv2.imread(x))]
                         for x in
                         tqdm(self.find_images(input_dir), desc="Reloading", file=sys.stdout)]
        elif group_method == 'group_face_cnn':
            import_FaceLandmarksExtractor()
            temp_list = []
            for x in tqdm(self.find_images(input_dir), desc="Reloading", file=sys.stdout):
                d = FaceLandmarksExtractor.extract(cv2.imread(x), 'cnn', True,
                                                   input_is_predetected_face=True)
                temp_list.append([x, np.array(d[0][1]) if len(d) > 0 else np.zeros((68, 2))])
        elif group_method == 'group_face_yaw':
            import_FaceLandmarksExtractor()
            temp_list = []
            for x in tqdm(self.find_images(input_dir), desc="Reloading", file=sys.stdout):
                d = FaceLandmarksExtractor.extract(cv2.imread(x), 'cnn', True,
                                                   input_is_predetected_face=True)
                temp_list.append([x, self.calc_landmarks_face_yaw(np.array(d[0][1]))])
        elif group_method == 'group_hist':
            temp_list = [
                [x, cv2.calcHist([cv2.imread(x)], [0], None, [256], [0, 256])]
                for x in
                tqdm(self.find_images(input_dir), desc="Reloading", file=sys.stdout)
            ]
        else:
            raise ValueError("{} group_method not found.".format(group_method))

        return self.splice_lists(img_list, temp_list)
예제 #21
0
    def __init__(self, imgpath):
        filelist = os.listdir(imgpath)

        self.face_names = []
        self.face_encodings = []
        for f in filelist:
            img = os.path.join(imgpath, f)
            if img.endswith(img_suffix) and os.path.isfile(img):
                image = face_recognition.load_image_file(img)

                if image is None:
                    print('image is None for file ',img)
                    continue

                face_encodings = face_recognition.face_encodings(image)[0]
                self.face_encodings.append(face_encodings)
                face_name = os.path.splitext(f)[0]
                self.face_names.append(face_name)
예제 #22
0
    def __init__(self, camera_entity, faces, name=None):
        """Initialize Dlib face identify entry."""
        # pylint: disable=import-error
        import face_recognition
        super().__init__()

        self._camera = camera_entity

        if name:
            self._name = name
        else:
            self._name = "Dlib Face {0}".format(
                split_entity_id(camera_entity)[1])

        self._faces = {}
        for face_name, face_file in faces.items():
            image = face_recognition.load_image_file(face_file)
            self._faces[face_name] = face_recognition.face_encodings(image)[0]
def faceRecognitionFromPicture(cvframe):
    print("---- Recognized Started ----")
    small_frame = cv2.resize(cvframe, (0, 0), fx=0.25, fy=0.25)

    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    small_rgb_frame = small_frame[:, :, ::-1]

    # get face location
    face_locations = face_recognition.face_locations(small_rgb_frame)
    print("- Face location scan completed")

    face_encodings = face_recognition.face_encodings(
        small_rgb_frame, face_locations)

    face_names = []
    for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
        matches = face_recognition.compare_faces(
            known_face_encodings, face_encoding)
        name = "not recognized"  # default name is not recognized

        # If a match was found in known_face_encodings, just use the first one.
        if True in matches:
            first_match_index = matches.index(True)
            name = known_face_names[first_match_index]

        face_names.append(name)
    
        
    print("- Face Locations:")
    # print face data
    print(*face_locations, sep='\n')
    print(*face_names, sep='\n')
    print("- Face name searching completed")
    # draw face rectangle and name on current frame
    drawFaceOnImage(cvframe, face_locations, face_names)
    # Label string
    faceNames = ''.join(face_names)
    count = str(len(face_locations))
    location = ','.join([str(i) for i in face_locations])
    return_string = "\nNames: "+faceNames + \
        "\nFace Count: "+count+"\nLocations: "+location+"\n"
    lblTag["text"] = return_string
    print("---- Recognized Completed ----")
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6):
    """
    Recognizes faces in given image using a trained KNN classifier

    :param X_img_path: path to image to be recognized
    :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
    :param model_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
    :param distance_threshold: (optional) distance threshold for face classification. the larger it is, the more chance
           of mis-classifying an unknown person as a known one.
    :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
        For faces of unrecognized persons, the name 'unknown' will be returned.
    """
    if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:
        raise Exception("Invalid image path: {}".format(X_img_path))

    if knn_clf is None and model_path is None:
        raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")

    # Load a trained KNN model (if one was passed in)
    if knn_clf is None:
        with open(model_path, 'rb') as f:
            knn_clf = pickle.load(f)

    # Load image file and find face locations
    X_img = face_recognition.load_image_file(X_img_path)
    X_face_locations = face_recognition.face_locations(X_img)

    # If no faces are found in the image, return an empty result.
    if len(X_face_locations) == 0:
        return []

    # Find encodings for faces in the test iamge
    faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)

    # Use the KNN model to find the best matches for the test face
    closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
    are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]

    # Predict classes and remove classifications that aren't within the threshold
    return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
예제 #25
0
    def __init__(self, camera_entity, faces, name=None):
        """Initialize Dlib face identify entry."""
        # pylint: disable=import-error
        import face_recognition
        super().__init__()

        self._camera = camera_entity

        if name:
            self._name = name
        else:
            self._name = "Dlib Face {0}".format(
                split_entity_id(camera_entity)[1])

        self._faces = {}
        for face_name, face_file in faces.items():
            try:
                image = face_recognition.load_image_file(face_file)
                self._faces[face_name] = \
                    face_recognition.face_encodings(image)[0]
            except IndexError as err:
                _LOGGER.error("Failed to parse %s. Error: %s", face_file, err)
예제 #26
0
파일: models.py 프로젝트: ihowson/ownphotos
    def _extract_faces(self):
        qs_unknown_person = Person.objects.filter(name='unknown')
        if qs_unknown_person.count() == 0:
            unknown_person = Person(name='unknown')
            unknown_person.save()
        else:
            unknown_person = qs_unknown_person[0]

        image = PIL.Image.open(self.thumbnail)
        image = np.array(image.convert('RGB'))

        face_encodings = face_recognition.face_encodings(image)
        face_locations = face_recognition.face_locations(image)
    
        faces = []
        if len(face_locations) > 0:
            for idx_face, face in enumerate(zip(face_encodings,face_locations)):
                face_encoding = face[0]
                face_location = face[1]
                top,right,bottom,left = face_location
                face_image = image[top:bottom, left:right]
                face_image = PIL.Image.fromarray(face_image)

                face = Face()
                face.image_path = self.image_hash+"_"+str(idx_face)+'.jpg'
                face.person = unknown_person
                face.photo = self
                face.location_top = face_location[0]
                face.location_right = face_location[1]
                face.location_bottom = face_location[2]
                face.location_left = face_location[3]
                face.encoding = face_encoding.tobytes().hex()
#                 face.encoding = face_encoding.dumps()

                face_io = BytesIO()
                face_image.save(face_io,format="JPEG")
                face.image.save(face.image_path, ContentFile(face_io.getvalue()))
                face_io.close()
                face.save()
예제 #27
0
    def process_image(self, image):
        """Process image."""
        # pylint: disable=import-error
        import face_recognition

        fak_file = io.BytesIO(image)
        fak_file.name = 'snapshot.jpg'
        fak_file.seek(0)

        image = face_recognition.load_image_file(fak_file)
        unknowns = face_recognition.face_encodings(image)

        found = []
        for unknown_face in unknowns:
            for name, face in self._faces.items():
                result = face_recognition.compare_faces([face], unknown_face)
                if result[0]:
                    found.append({
                        ATTR_NAME: name
                    })

        self.process_faces(found, len(unknowns))
예제 #28
0
파일: entry.py 프로젝트: DeliangFan/face
    def load_image(self, file_path, name):
        if not os.path.exists(file_path):
            print("Image file not existed")
            return -1

        image_hash = self.compute_hash(file_path)
        if self.store.get_face_by_hash(image_hash):
            print("Face already recorded.")
            return -2

        try:
            image = face_recognition.load_image_file(file_path)
            face_encoding = face_recognition.face_encodings(image)[0]
        except Exception:
            print("Failed to recognition face")
            return -3

        face = {
            "name": name,
            "hash": image_hash,
            "face_encoding": list(face_encoding)
        }

        self.store.create_face(face)
def predict(X_img_path, knn_clf = None, model_save_path ="", DIST_THRESH = .5):
    """
    recognizes faces in given image, based on a trained knn classifier

    :param X_img_path: path to image to be recognized
    :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified.
    :param model_save_path: (optional) path to a pickled knn classifier. if not specified, model_save_path must be knn_clf.
    :param DIST_THRESH: (optional) distance threshold in knn classification. the larger it is, the more chance of misclassifying an unknown person to a known one.
    :return: a list of names and face locations for the recognized faces in the image: [(name, bounding box), ...].
        For faces of unrecognized persons, the name 'N/A' will be passed.
    """

    if not isfile(X_img_path) or splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:
        raise Exception("invalid image path: {}".format(X_img_path))

    if knn_clf is None and model_save_path == "":
        raise Exception("must supply knn classifier either thourgh knn_clf or model_save_path")

    if knn_clf is None:
        with open(model_save_path, 'rb') as f:
            knn_clf = pickle.load(f)

    X_img = face_recognition.load_image_file(X_img_path)
    X_faces_loc = face_locations(X_img)
    if len(X_faces_loc) == 0:
        return []

    faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_faces_loc)


    closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)

    is_recognized = [closest_distances[0][i][0] <= DIST_THRESH for i in range(len(X_faces_loc))]

    # predict classes and cull classifications that are not with high confidence
    return [(pred, loc) if rec else ("N/A", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_faces_loc, is_recognized)]
예제 #30
0
        print(myDataList)


encodeListKnown = findEncodings(images)
print('Encoding complete...')

cap = cv2.VideoCapture(0)

while True:
    success, img = cap.read()
    imgs = cv2.resize(img, (0, 0), None, 0.25, 0.25)
    imgs = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    #location in current frame
    facesCurFrame = face_recognition.face_locations(imgs)
    encodeCurFrame = face_recognition.face_encodings(imgs, facesCurFrame)

    #finding the matches
    for encodeFace, faceLoc in zip(encodeCurFrame, facesCurFrame):
        matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
        faceDistance = face_recognition.face_distance(encodeListKnown,
                                                      encodeFace)
        print(faceDistance)
        matchIndex = np.argmin(faceDistance)

        if matches[matchIndex]:
            name = classNames[matchIndex].upper()
            print(name)
            y1, x2, y2, x1 = faceLoc
            cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
            cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0),
예제 #31
0
#twilioAccount Sid and Auth Token from 
account_sid = '' #enter account sid here
auth_token = '' #enter account token here
client = Client(account_sid, auth_token)

# Camera 0 is the integrated web cam on my netbook
camera_port = 0
 
#Number of frames to throw away while the camera adjusts to light levels
ramp_frames = 30

#loads known image of user
##paste path to a pic of your face
User_image = face_recognition.load_image_file("file path")
User_face_encoding = face_recognition.face_encodings(User_image)[0]


# Now we can initialize the camera capture object with the cv2.VideoCapture class.
# All it needs is the index to a camera port.
camera = cv2.VideoCapture(camera_port)
 
# Captures a single image from the camera and returns it in PIL format
def get_image():
 # read is the easiest way to get a full image out of a VideoCapture object.
 retval, im = camera.read()
 return im

def talk(audio):
  #voice agent 
    print(audio)
            images.append(img)
            name=filename.split(".")
            labels.append(name[0])
            tot_images=tot_images+1
    return images,labels,tot_images

imgs,labels,totals=load_images_from_folder(sys.argv[1])

counter=0
for image in imgs:
    
    imageblack=cv2.cvtColor(image, cv2.COLOR_RGB2GRAY )
    facecascade=cv2.CascadeClassifier("/Users/r17935avinash/anaconda3/pkgs/opencv3-3.1.0-py35_0/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml")
    faces=facecascade.detectMultiScale(imageblack,1.3,4)
    [(x1,y1,w1,h1)]=faces
    bachan_encoding.append(np.array(face_recognition.face_encodings(image[y1:y1+h1,x1:x1+w1])))
    cv2.rectangle(image,(x1,y1),(x1+w1,y1+h1),(255,0,0),4)
    print("Image",counter+1,"encoded. Label:",labels[counter])
    counter=counter+1;
    #plt.imshow(image)
    #plt.xlabel(labels)
    plt.show()
    
with open(sys.argv[1]+'/image_encodings','wb') as file1:
    pickle.dump(bachan_encoding,file1)
with open(sys.argv[1]+'/labels','wb') as file2:
    pickle.dump(labels,file2)
file1.close()
file2.close()

print("Successfully trained")
import cv2
import numpy as np
import face_recognition

imgElon = face_recognition.load_image_file('ImagesBasic/Elon Musk.jpg')
imgElon = cv2.cvtColor(imgElon, cv2.COLOR_BGR2RGB)
imgTest = face_recognition.load_image_file('ImagesBasic/Bill gates.jpg')
imgTest = cv2.cvtColor(imgTest, cv2.COLOR_BGR2RGB)

faceLoc = face_recognition.face_locations(imgElon)[0]
encodeElon = face_recognition.face_encodings(imgElon)[0]
cv2.rectangle(imgElon, (faceLoc[3], faceLoc[0]), (faceLoc[1], faceLoc[2]),
              (255, 0, 255), 2)
print(faceLoc)

faceLocTest = face_recognition.face_locations(imgTest)[0]
encodeTest = face_recognition.face_encodings(imgTest)[0]
cv2.rectangle(imgTest, (faceLocTest[3], faceLocTest[0]),
              (faceLocTest[1], faceLocTest[2]), (255, 0, 255), 2)

results = face_recognition.compare_faces([encodeElon], encodeTest)
faceDis = face_recognition.face_distance([encodeElon], encodeTest)
cv2.putText(imgTest, f'{results} {round(faceDis[0],2)} ', (50, 50),
            cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 255), 3)

cv2.imshow('Elon Musk', imgElon)
cv2.imshow('Elon Test', imgTest)
cv2.waitKey(0)
예제 #34
0
def run_recognize(cameraId, scaleFactor, minSizeTuple, tolerance, minNeighbour,
                  apiService, runMode, showDetailInfo):
    try:
        global whoIsLocked, inActionLock
        timeOfLock = None
        currentPerson = None
        alpha = 1.20  # Contrast control (1.0-3.0)
        beta = 21  # Brightness control (0-100)

        # Buttons to GPIO pins (physical numbering)
        buttonStart = 11
        buttonBreak = 21
        buttonTask = 22
        buttonEnd = 24
        buzzerPin = 13

        bounceTime = 230  # Used when setting up events as bounce prevent time
        buzzerDutyCycle = 0.7
        lockInTime = 6.5  # How much time user has to choose action

        display = lcddriver.lcd(
        )  # My display has 16 characters maximum; 2 lines

        GPIO.setmode(GPIO.BOARD)  # Use physical pin numbering
        GPIO.setup(buttonStart, GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(buttonEnd, GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(buttonBreak, GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(buttonTask, GPIO.IN, pull_up_down=GPIO.PUD_UP)
        GPIO.setup(buzzerPin, GPIO.OUT)
        buzzer = GPIO.PWM(buzzerPin, 1200)

        def map_button_to_eventId(button):
            if button == buttonStart:
                return 1
            elif button == buttonBreak:
                return 2
            elif button == buttonTask:
                return 3
            elif button == buttonEnd:
                return 4
            else:
                raise Exception(
                    f'Button not mapped to any event. GPIO pin: {button}')

        def event_callback(button):
            # Setting up globals
            global whoIsLocked, inActionLock, lastPersonEntry

            if inActionLock:
                buzzer_quick_alert(buzzer, buzzerDutyCycle)
                print(
                    f'[INFO] [{strftime("%m-%d %H:%M:%S", getLocalTime())}] Action prevented, ongoing action.'
                )
                return
            if whoIsLocked is None:
                buzzer_quick_alert(buzzer, buzzerDutyCycle)
                print(
                    f'[INFO] [{strftime("%m-%d %H:%M:%S", getLocalTime())}] Action prevented, nobody in lock.'
                )
                return

            actionTime = getCurrentTime()

            # To prevent button bouncing
            #if (lastPersonEntry is not None and
            #    lastPersonEntry.time+9 > actionTime):
            #    # remove print in final version
            #    if showDetailInfo:
            #        print('[INFO] Bounce prevented (not enough time passed between actions.')
            #    return

            eventId = map_button_to_eventId(button)

            # Prevent bouncing if last person = current person in X seconds timeframe
            if (lastPersonEntry is not None
                    and lastPersonEntry.personId == whoIsLocked[0]
                    and lastPersonEntry.eventId == eventId
                    and actionTime < lastPersonEntry.time + 20):
                if showDetailInfo:
                    print(
                        f'[INFO] [{strftime("%m-%d %H:%M:%S", getLocalTime())}] Action prevented. Same person, same action. Minimum time has not passed. Time remaining is {(round(lastPersonEntry.time+20 - actionTime, 1))}s.'
                    )
                return

            inActionLock = True  # Running the command, no interrupts
            display.lcd_clear()

            # This is new last person
            lastPersonEntry = LastPersonEntry(actionTime, eventId,
                                              whoIsLocked[0])

            if whoIsLocked[0] is None:  # id is None which means user is Unknown
                print(
                    f'[{strftime("%m-%d %H:%M:%S", getLocalTime())}] Message -> Person not recognized, please look at camera and try again.'
                )
                response = apiService.post_action(None, eventId)
                if response.serverError:
                    print(
                        f'[{strftime("%m-%d %H:%M:%S", getLocalTime())}] [ERROR] Server error.'
                    )
                display.lcd_display_string("Not recognized", 1)
                display.lcd_display_string("Please try again", 2)
                buzzer_error(buzzer, buzzerDutyCycle)
            else:  # User is known
                response = apiService.post_action(whoIsLocked[0], eventId)
                if showDetailInfo:
                    print(
                        f'[INFO] [{strftime("%m-%d %H:%M:%S", getLocalTime())}] User  id is -> {whoIsLocked[0]}'
                    )
                if not response.serverError:
                    if response.message is not None:
                        print(
                            f'[{strftime("%m-%d %H:%M:%S", getLocalTime())}] Message -> {response.message}'
                        )
                        if response.messageCode == 1:
                            display.lcd_display_string("  Work already", 1)
                            display.lcd_display_string("    started", 2)
                            buzzer_error(buzzer, buzzerDutyCycle)
                        elif response.messageCode == 2:
                            display.lcd_display_string("    Work not", 1)
                            display.lcd_display_string("    started", 2)
                            buzzer_error(buzzer, buzzerDutyCycle)
                        elif response.messageCode == 3:
                            display.lcd_display_string("   Break not", 1)
                            display.lcd_display_string("     closed", 2)
                            buzzer_error(buzzer, buzzerDutyCycle)
                        elif response.messageCode == 4:
                            display.lcd_display_string("Welcome", 1)
                            display.lcd_display_string(whoIsLocked[1], 2)
                            buzzer_ok(buzzer, buzzerDutyCycle)
                        elif response.messageCode == 5:
                            display.lcd_display_string("Have fun", 1)
                            display.lcd_display_string(whoIsLocked[1], 2)
                            buzzer_ok(buzzer, buzzerDutyCycle)
                        elif response.messageCode == 6:
                            display.lcd_display_string("Stay safe", 1)
                            display.lcd_display_string(whoIsLocked[1], 2)
                            buzzer_ok(buzzer, buzzerDutyCycle)
                        elif response.messageCode == 7:
                            display.lcd_display_string("Goodbye", 1)
                            display.lcd_display_string(whoIsLocked[1], 2)
                            buzzer_ok(buzzer, buzzerDutyCycle)
                        elif response.messageCode == 8:
                            display.lcd_display_string("Welcome back", 1)
                            display.lcd_display_string(whoIsLocked[1], 2)
                            buzzer_ok(buzzer, buzzerDutyCycle)
                        elif response.messageCode == 9:
                            display.lcd_display_string("  Official AB.", 1)
                            display.lcd_display_string("   not closed", 2)
                            buzzer_error(buzzer, buzzerDutyCycle)
                        elif response.messageCode == 10:
                            display.lcd_display_string("Not recognized", 1)
                            display.lcd_display_string("Please try again", 2)
                            buzzer_error(buzzer, buzzerDutyCycle)
                            if showDetailInfo:
                                print('[WARNING] Message code 9 appeared.')
                        else:
                            display.lcd_display_string("Unknown message", 1)
                            display.lcd_display_string("      code", 2)
                else:
                    display.lcd_display_string("  Server error", 1)
                    buzzer_error(buzzer, buzzerDutyCycle)
            sleep(3.9)  # Shows lcd text and locks actions for time
            display.lcd_clear()
            inActionLock = False

        GPIO.add_event_detect(buttonStart,
                              GPIO.RISING,
                              callback=lambda x: event_callback(buttonStart),
                              bouncetime=bounceTime)
        GPIO.add_event_detect(buttonEnd,
                              GPIO.RISING,
                              callback=lambda x: event_callback(buttonEnd),
                              bouncetime=bounceTime)
        GPIO.add_event_detect(buttonBreak,
                              GPIO.RISING,
                              callback=lambda x: event_callback(buttonBreak),
                              bouncetime=bounceTime)
        GPIO.add_event_detect(buttonTask,
                              GPIO.RISING,
                              callback=lambda x: event_callback(buttonTask),
                              bouncetime=bounceTime)

        print(
            f'[INFO] [{strftime("%m-%d %H:%M:%S", getLocalTime())}] Loading encodings from file.'
        )
        try:
            # Using absolute path, take caution
            data = pickle.loads(
                open(
                    '/home/pi/Desktop/face_recognition_for_attendance_rpi/encodings.pickle',
                    'rb').read())
        except Exception as e:
            print(f'[ERROR] No faces in the model. Error: {e}')
            raise Exception('Error on loading pickle file.')

        detector = cv2.CascadeClassifier(cv2.data.haarcascades +
                                         'haarcascade_frontalface_default.xml')
        print(
            f'[INFO] [{strftime("%m-%d %H:%M:%S", getLocalTime())}] Starting video stream, press "q" to exit.'
        )
        vs = VideoStream(src=cameraId).start()
        sleep(1.3)  # Warm up

        while True:
            thisFrameTime = getCurrentTime()

            frame = vs.read()
            # choose lower width for performance
            frame = resize(frame, width=700)
            # increase brightness and contrast for a bit
            frame = cv2.convertScaleAbs(frame, alpha=alpha, beta=beta)

            # 1) BGR to grayscale: for face detection
            # 2) BGR to RGB: for face recognition
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # grayscale frame
            # detectMultiScale autoconverts to greyscale if not in greyscale
            rects = detector.detectMultiScale(gray,
                                              scaleFactor=scaleFactor,
                                              minNeighbors=minNeighbour,
                                              minSize=minSizeTuple,
                                              flags=cv2.CASCADE_SCALE_IMAGE)

            # prepare coordinates for face_recognition function
            boxes = [(y, x + w, y + h, x) for (x, y, w, h) in rects]

            # get biggest box
            biggestBoxInList = getBiggestBoxInList(boxes)

            encodings = face_recognition.face_encodings(rgb, biggestBoxInList)
            names = []
            for encoding in encodings:
                matches = face_recognition.compare_faces(data['encodings'],
                                                         encoding,
                                                         tolerance=tolerance)
                name = (None, 'Unknown')
                if True in matches:
                    matchedIds = [i for (i, b) in enumerate(matches) if b]
                    counts = {}
                    for i in matchedIds:
                        name = data['names'][i]
                        counts[name] = counts.get(name, 0) + 1
                    name = max(counts, key=counts.get)

                    # split id and name
                    splitName = name.split(' ', 1)
                    # set name to tuple (id, user)
                    name = (splitName[0], splitName[1])
                names.append(name)

            if len(names) > 0:
                currentPerson = names[0]  # Pick first and only from array

                if whoIsLocked is None and inActionLock == False:
                    # perpare name because display has 16 chars max
                    if (currentPerson[0] is not None
                            and len(currentPerson[1]) > 16):
                        currentPerson = (currentPerson[0],
                                         currentPerson[1][0:16])

                    display.lcd_clear()
                    display.lcd_display_string("Choose input", 1)
                    display.lcd_display_string(currentPerson[1], 2)
                    timeOfLock = thisFrameTime
                    # Setting variable to tuple (id/None,user/Unknown)
                    whoIsLocked = currentPerson
            else:
                currentPerson = None

            if whoIsLocked is not None and inActionLock == False:
                # first check: if initial lock-on was on Unknown but now we have real user, if that happens lock in on real user
                # second check is to give user enough time to choose input
                if (whoIsLocked[0] is None and currentPerson is not None
                        and currentPerson[0] is not None):
                    whoIsLocked = None
                    display.lcd_clear()
                    timeOfLock = thisFrameTime  # refresh time of lock
                    display.lcd_display_string("Choose input", 1)
                    display.lcd_display_string(currentPerson[1], 2)
                    sleep(0.1
                          )  # delay a bit to let display fully load characters
                    whoIsLocked = currentPerson
                elif timeOfLock + lockInTime < thisFrameTime:
                    whoIsLocked = None
                    display.lcd_clear()

            # This is used just to show who is locked in on video feedback
            if runMode == 1 and whoIsLocked is not None:
                timeLeft = round(timeOfLock + lockInTime - thisFrameTime, 1)
                if timeLeft > 0:  # Countdown goes on if action ran
                    # WhoIsLocked[1] is name/Unknown
                    cv2.putText(frame, f'{whoIsLocked[1]} ({timeLeft}s)',
                                (38, 38), cv2.FONT_HERSHEY_SIMPLEX, 1.5,
                                (0, 0, 255), 3)

            # This is for drawing on screen and can be disabled if no display
            if runMode == 1:
                for ((top, right, bottom, left),
                     name) in zip(biggestBoxInList, names):
                    cv2.rectangle(frame, (left, top), (right, bottom),
                                  (0, 255, 0), 2)
                    y = top - 15 if top - 15 > 15 else top + 15
                    cv2.putText(frame, currentPerson[1], (left, y),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
                # display video
                cv2.imshow('Camera', frame)

            key = cv2.waitKey(1) & 0xFF
            if key == ord("q"):
                break

    finally:
        # cleanup
        cv2.destroyAllWindows()
        vs.stop()
        buzzer.stop()
        display.lcd_clear()
        GPIO.cleanup()
        print(
            f'[INFO] [{strftime("%m-%d %H:%M:%S", getLocalTime())}] Recognizer finished.'
        )
def search_image():
    message = "Welcome to P.R.SYSTEM"
    # Check if a valid image file was uploaded
    if request.method == 'POST':
        if 'file' not in request.files:
            return redirect(request.url)

        file = request.files['file']

        if file.filename == '':
            return redirect(request.url)

        if file and allowed_file(file.filename):
            # The image file seems valid! Detect faces and return the result.
            img = face_recognition.load_image_file(file)
            # Get face encodings for any faces in the uploaded image
            if len(str(face_recognition.face_locations(img))) > 20:
                face_encoding = face_recognition.face_encodings(img)[0]
                name = ""
                id = ""
                father_name = ""
                address = ""
                guardian_cell = ""
                blood_group = ""
                recent_diagonised_disease = ""
                face_found_in_image = False
                face_found_in_database = False
                if len(face_encoding) > 0:
                    face_found_in_image = True
                    mycursor.execute("select ID, FACE_ENCODING from persons_data_table")
                    result = mycursor.fetchall()
                    message = "face doesnt found in database"
                    for row in result:
                        tempid = row[0]
                        coding = pickle.loads(row[1])
                        results = face_recognition.compare_faces(coding, face_encoding)
                        if results[0] == True:
                            mycursor.execute(
                                "select IDENTITY, NAME,FATHER_NAME, ADDRESS,GUARDIAN_CELL, BLOOD_GROUP,RDD from persons_data_table where ID = %s",
                                (tempid,))
                            sresults = mycursor.fetchall()
                            face_found_in_database = True
                            for row in sresults:
                                message = True
                                id = row[0]
                                name = row[1]
                                father_name = row[2]
                                address = row[3]
                                guardian_cell = row[4]
                                blood_group = row[5]
                                recent_diagonised_disease = row[6]
                                break
                if message is not True:
                    return '''
                        <!doctype html>
                        <title>Is this a picture identified?</title>
                        <h1>Upload a picture and see if it's a picture of a known person or not!</h1>
                        <p>Face doesnt found in Data Base please Enter details about it</P><a href="./entry">here..</a><p>
                        <form method="POST" enctype="multipart/form-data">
                          <input type="file" name="file" accept="image/*"  required="required" >
                          <input type="submit" value="Upload">
                        </form>
                       '''
                else:
                    return '''
                        <!doctype html>
                        <title>Face Found</title>
                        <h1>{}.</h1>
                        <img src="http://192.168.43.113:5555/pictures/old/{}.jpg" width="200px" height=270px><br>
                        Father Name: {}<br>
                        Address: {}<br>
                        Guardian Cell:{}<br>
                        Blood Group: {}<br>
                        Recent Diagonosed Desease: {}<br>
                        </html>
                        '''.format(name, id, father_name, address, guardian_cell, blood_group, recent_diagonised_disease)
            else:
                return '''
                    <!doctype html>
                    <title>Is this a picture identified?</title>
                    <h1>Upload a picture and see if it's a picture of a known person or not!</h1>
                    <p>please Upload a faced image </P>
                    <form method="POST" enctype="multipart/form-data">
                      <input type="file" name="file" accept="image/*"  required="required" >
                      <input type="submit" value="Upload">
                    </form>
                    '''

    return '''
                        <!doctype html>
                        <title>Is this a picture identified?</title>
                        <h1>Upload a picture and see if it's a picture of a known person or not!</h1>
                        <p>{}</P>
                        <form method="POST" enctype="multipart/form-data">
                          <input type="file" name="file" accept="image/*"  required="required" >
                          <input type="submit" value="Upload">
                        </form>
                        '''.format(message)
예제 #36
0
import face_recognition

picture_of_me = face_recognition.load_image_file(
    "img/face_recognition_known/obama.jpg")
my_face_encoding = face_recognition.face_encodings(picture_of_me)[0]

# my_face_encoding now contains a universal 'encoding' of my facial features that can be compared to any other picture of a face!

unknown_picture = face_recognition.load_image_file(
    "img/face_recognition_unknown/unknown_hillary.jpg")
unknown_face_encoding = face_recognition.face_encodings(unknown_picture)[0]

# Now we can see the two face encodings are of the same person with `compare_faces`!

results = face_recognition.compare_faces([my_face_encoding],
                                         unknown_face_encoding)

if results[0] == True:
    print("It's a picture of obama!")
else:
    print("It's not a picture of obama!")
예제 #37
0
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.

# Get a reference to webcam #0 (the default one)
#video_capture = cv2.VideoCapture(0)

camera = picamera.PiCamera()
#camera.resolution = (320, 240)
#frame = np.empty((240, 320, 3), dtype=np.uint8)

camera.resolution = (1024, 768)
frame = np.empty((768, 1024, 3), dtype=np.uint8)

# Load a sample picture and learn how to recognize it.
obama_image = face_recognition.load_image_file("obama.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]

# Load a second sample picture and learn how to recognize it.
biden_image = face_recognition.load_image_file("biden.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]

dennis_image = face_recognition.load_image_file("dennis-sivia.jpg")
dennis_face_encoding = face_recognition.face_encodings(dennis_image)[0]

# Create arrays of known face encodings and their names
known_face_encodings = [
    obama_face_encoding, biden_face_encoding, dennis_face_encoding
]
known_face_names = ["Barack Obama", "Joe Biden", "Dennis Sivia"]

print("[INFO] quantifying faces...")
imagePaths = list(paths.list_images(args["dataset"]))

# initialize the list of known encodings and known names
knownEncodings = []
knownNames = []

# Loop over images
for (i, imagePath) in enumerate(imagePaths):

    print("[INFO] processing image {}/{}".format(i + 1, len(imagePaths)))
    name = imagePath.split(os.path.sep)[-2]

    image = cv2.imread(imagePath)
    rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    boxes = face_recognition.face_locations(rgb,
                                            model=args["detection_method"])

    encodings = face_recognition.face_encodings(rgb, boxes)

    for encoding in encodings:
        knownEncodings.append(encoding)
        knownNames.append(name)

# dump the facial encodings + names to disk
print("[INFO] serializing encodings...")
data = {"encodings": knownEncodings, "names": knownNames}
f = open(args["encodings"], "wb")
f.write(pickle.dumps(data))
f.close()
예제 #39
0
def reconocimiento(id, image):
    encodings_name = "encodings/{}.pickle".format(id)
    image_location = image
    # construct the argument parser and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-e",
                    "--encodings",
                    required=True,
                    help="path to serialized db of facial encodings")
    ap.add_argument("-i", "--image", required=True, help="path to input image")
    ap.add_argument("-d",
                    "--detection-method",
                    type=str,
                    default="cnn",
                    help="face detection model to use: either `hog` or `cnn`")
    args = vars(
        ap.parse_args([
            "--encodings", encodings_name, "--image", image_location,
            "--detection-method", "hog"
        ]))
    # load the known faces and embeddings
    #print("[INFO] loading encodings...")
    data = pickle.loads(open(args["encodings"], "rb").read())

    # load the input image and convert it from BGR to RGB
    image = cv2.imread(args["image"])
    rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    # detect the (x, y)-coordinates of the bounding boxes corresponding
    # to each face in the input image, then compute the facial embeddings
    # for each face
    #print("[INFO] recognizing faces...")
    boxes = face_recognition.face_locations(rgb,
                                            model=args["detection_method"])
    encodings = face_recognition.face_encodings(rgb, boxes)

    # initialize the list of names for each face detected
    names = []

    # loop over the facial embeddings
    for encoding in encodings:
        # attempt to match each face in the input image to our known
        # encodings
        matches = face_recognition.compare_faces(data["encodings"], encoding)
        name = "Unknown"

        # check to see if we have found a match
        if True in matches:
            # find the indexes of all matched faces then initialize a
            # dictionary to count the total number of times each face
            # was matched
            matchedIdxs = [i for (i, b) in enumerate(matches) if b]
            counts = {}

            # loop over the matched indexes and maintain a count for
            # each recognized face face
            for i in matchedIdxs:
                name = data["names"][i]
                counts[name] = counts.get(name, 0) + 1

            # determine the recognized face with the largest number of
            # votes (note: in the event of an unlikely tie Python will
            # select first entry in the dictionary)
            name = max(counts, key=counts.get)

        # update the list of names
        names.append(name)
    if name == id:
        print(1)
    else:
        print(0)
예제 #40
0
def main():
    print 'starting main function'
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-t',
        '--train',
        help=
        'Running train mode, name of person who is going to train have to input'
    )
    args = parser.parse_args()

    if args.train:
        save_image(args.train)
        create_model(train_dir, model_path)

    if args.train == None:
        try:
            while True:
                print "Loop running"
                person = {}
                video_capture = cv2.VideoCapture(cam_link)
                distance_threshold = 0.3
                ret, frame = video_capture.read()
                rgb_frame = frame[:, :, ::-1]
                face_locations = face_recognition.face_locations(rgb_frame)
                if len(face_locations) == 0:
                    continue
                faces_encodings = face_recognition.face_encodings(
                    rgb_frame, face_locations)
                with open(model_path, 'rb') as f:
                    knn_clf = pickle.load(f)
                closest_distances = knn_clf.kneighbors(faces_encodings,
                                                       n_neighbors=1)
                are_matches = [
                    closest_distances[0][i][0] <= distance_threshold
                    for i in range(len(face_locations))
                ]
                for pred, loc, rec in zip(knn_clf.predict(faces_encodings),
                                          face_locations, are_matches):
                    if rec:
                        name = str(pred)
                        print name, loc
                        top, right, bottom, left = loc
                        face = frame[top:bottom, left:right]
                        update_image(name, face)
                        check = 0
                        for p in vistor:
                            if p['name'] == name:
                                check = 1
                                now = current_milli_time()
                                if now - p['record_time'] >= detection_delay_ms:
                                    display_name = p['name']
                                    p['record_time'] = current_milli_time()
                                    client.connect(broker, port)
                                    client.publish("test/detection",
                                                   display_name)
                        if check == 0:
                            person['name'] = name
                            person['record_time'] = current_milli_time()
                            vistor.append(person)
                            display_name = person['name']
                            client.connect(broker, port)
                            client.publish("test/detection", display_name)
                    else:
                        print "unkown", loc
        except KeyboardInterrupt:
            print 'Stoped face detection application'
            pass
예제 #41
0
print("loading known faces")

known_faces = []
known_names = []

for name in os.listdir(KNOWN_FACES_DIR):
    if name.endswith(".DS_Store"):
        continue
    for filename in os.listdir(f"{KNOWN_FACES_DIR}/{name}"):
        if filename.endswith(".DS_Store"):
            continue
        image = face_recognition.load_image_file(
            f"{KNOWN_FACES_DIR}/{name}/{filename}")
        [fn for fn in name if not fn.endswith(".DS_Store")]
        encoding = face_recognition.face_encodings(image)[0]
        known_faces.append(encoding)
        known_names.append(name)

print("processing unknown faces")

for filename in os.listdir(UNKNOWN_FACES_DIR):
    print(filename)
    if filename.endswith(".DS_Store"):
        continue
    image = face_recognition.load_image_file(f"{UNKNOWN_FACES_DIR}/{filename}")
    locations = face_recognition.face_locations(image, model=MODEL)
    encodings = face_recognition.face_encodings(image, locations)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

    for face_encoding, face_location in zip(encodings, locations):
예제 #42
0
파일: face_train.py 프로젝트: orens77/pyzm
    def train(self, size=None):
        t = Timer()
        known_images_path = self.options.get('known_images_path')
        train_model = self.options.get('face_train_model')
        knn_algo = self.options.get('face_recog_knn_algo', 'ball_tree')

        upsample_times = int(self.options.get('face_upsample_times', 1))
        num_jitters = int(self.options.get('face_num_jitters', 0))

        encoding_file_name = known_images_path + '/faces.dat'
        try:
            if (os.path.isfile(known_images_path + '/faces.pickle')):
                # old version, we no longer want it. begone
                self.logger.Debug(
                    2,
                    'removing old faces.pickle, we have moved to clustering')
                os.remove(known_images_path + '/faces.pickle')
        except Exception as e:
            self.logger.Error('Error deleting old pickle file: {}'.format(e))

        directory = known_images_path
        ext = ['.jpg', '.jpeg', '.png', '.gif']
        known_face_encodings = []
        known_face_names = []

        try:
            for entry in os.listdir(directory):
                if os.path.isdir(directory + '/' + entry):
                    # multiple images for this person,
                    # so we need to iterate that subdir
                    self.logger.Debug(
                        1,
                        '{} is a directory. Processing all images inside it'.
                        format(entry))
                    person_dir = os.listdir(directory + '/' + entry)
                    for person in person_dir:
                        if person.endswith(tuple(ext)):
                            self.logger.Debug(
                                1, 'loading face from  {}/{}'.format(
                                    entry, person))

                            # imread seems to do a better job of color space conversion and orientation
                            known_face = cv2.imread('{}/{}/{}'.format(
                                directory, entry, person))
                            if known_face is None or known_face.size == 0:
                                self.logger.Error(
                                    'Error reading file, skipping')
                                continue
                            #known_face = face_recognition.load_image_file('{}/{}/{}'.format(directory,entry, person))
                            if not size:
                                size = int(self.options.get('resize', 800))
                            self.logger.Debug(1, 'resizing to {}'.format(size))
                            known_face = imutils.resize(known_face, width=size)

                            # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
                            #self.logger.Debug(1,'Converting from BGR to RGB')
                            known_face = known_face[:, :, ::-1]
                            face_locations = face_recognition.face_locations(
                                known_face,
                                model=train_model,
                                number_of_times_to_upsample=upsample_times)
                            if len(face_locations) != 1:
                                self.logger.Error(
                                    'File {} has {} faces, cannot use for training. We need exactly 1 face. If you think you have only 1 face try using "cnn" for training mode. Ignoring...'
                                    .format(person, len(face_locations)))
                            else:
                                face_encodings = face_recognition.face_encodings(
                                    known_face,
                                    known_face_locations=face_locations,
                                    num_jitters=num_jitters)
                                known_face_encodings.append(face_encodings[0])
                                known_face_names.append(entry)
                                #self.logger.Debug ('Adding image:{} as known person: {}'.format(person, person_dir))

                elif entry.endswith(tuple(ext)):
                    # this was old style. Lets still support it. The image is a single file with no directory
                    self.logger.Debug(1, 'loading face from  {}'.format(entry))
                    #known_face = cv2.imread('{}/{}/{}'.format(directory,entry, person))
                    known_face = cv2.imread('{}/{}'.format(directory, entry))

                    if not size:
                        size = int(self.options.get('resize', 800))
                        self.logger.Debug(1, 'resizing to {}'.format(size))
                        known_face = imutils.resize(known_face, width=size)
                    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
                    known_face = known_face[:, :, ::-1]
                    face_locations = face_recognition.face_locations(
                        known_face,
                        model=train_model,
                        number_of_times_to_upsample=upsample_times)

                    if len(face_locations) != 1:
                        self.logger.Error(
                            'File {} has {} faces, cannot use for training. We need exactly 1 face. If you think you have only 1 face try using "cnn" for training mode. Ignoring...'
                            .format(person), len(face_locations))
                    else:
                        face_encodings = face_recognition.face_encodings(
                            known_face,
                            known_face_locations=face_locations,
                            num_jitters=num_jitters)
                        known_face_encodings.append(face_encodings[0])
                        known_face_names.append(os.path.splitext(entry)[0])

        except Exception as e:
            self.logger.Error(
                'Error initializing face recognition: {}'.format(e))
            raise ValueError(
                'Error opening known faces directory. Is the path correct?')

        # Now we've finished iterating all files/dirs
        # lets create the svm
        if not len(known_face_names):
            self.logger.Error(
                'No known faces found to train, encoding file not created')
        else:
            n_neighbors = int(round(math.sqrt(len(known_face_names))))
            self.logger.Debug(
                2, 'Using algo:{} n_neighbors to be: {}'.format(
                    knn_algo, n_neighbors))
            knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
                                                 algorithm=knn_algo,
                                                 weights='distance')

            self.logger.Debug(1, 'Training model ...')
            knn.fit(known_face_encodings, known_face_names)

            f = open(encoding_file_name, "wb")
            pickle.dump(knn, f)
            f.close()
            self.logger.Debug(
                1, 'wrote encoding file: {}'.format(encoding_file_name))
        diff_time = t.stop_and_get_ms()
        self.logger.Debug(
            1, 'perf: Face Recognition training took: {}'.format(diff_time))
def func():
    data = request.json
    urls = list(data["urls"])
    unknown_url = data["unknown_url"]

    faces = 0
    label = ''

    resp = urllib.request.urlopen(unknown_url)
    image = np.asarray(bytearray(resp.read()), dtype="uint8")
    frame = cv2.imdecode(image, cv2.IMREAD_COLOR)
    image = frame

    image_bbox, faces = model_test.get_bbox(image)

    if (faces == 1):
        prediction = np.zeros((1, 3))

        for model_name in os.listdir(model_dir):
            h_input, w_input, model_type, scale = parse_model_name(model_name)
            param = {
                "org_img": image,
                "bbox": image_bbox,
                "scale": scale,
                "out_w": w_input,
                "out_h": h_input,
                "crop": True,
            }
            if scale is None:
                param["crop"] = False
            img = image_cropper.crop(**param)
            prediction += model_test.predict(
                img, os.path.join(model_dir, model_name))

        l = np.argmax(prediction)
        value = prediction[0][l] / 2
        if l == 1:
            label = 'true'
        else:
            label = 'fake'

    error = -1
    body = "False"
    message = ""

    if (faces == 0):
        error = 1
        body = "Error"
        message = "No Face Detected"
    elif (faces > 1):
        error = 2
        body = "Error"
        message = "Multiple Faces Detected"
    elif (faces == 1 and label == 'fake'):
        error = 3
        body = "Error"
        message = "Only 1 Face Detected but is fake"

    elif (faces == 1 and label == 'true'):

        boxes = face_recognition.face_locations(image)

        if (len(boxes) == 0):
            message = "No Face Detected"
            error = 1
            body = "Error"

            # return False, error, msg

        else:
            unknown = face_recognition.face_encodings(image, boxes)[0]

            for im_path in urls:
                resp = urllib.request.urlopen(im_path)
                image = np.asarray(bytearray(resp.read()), dtype="uint8")
                image = cv2.imdecode(image, cv2.IMREAD_COLOR)

                boxes = face_recognition.face_locations(image)
                if (len(boxes) == 0):
                    continue

                known = face_recognition.face_encodings(image, boxes)[0]
                matches = face_recognition.compare_faces([unknown], known)
                if (matches[0] == True):
                    error = -1
                    message = "Validated"
                    body = "True"
                    break

            if (body != "True"):
                error = 4
                message = "Face not Validated"
                body = "Error"

    return jsonify(statusCode=200, body=body, error=error, message=message)
예제 #44
0
def encode_image(path):
    img = fr.load_image_file(f"images/{sys.argv[1]}/{path}")
    try:
        return fr.face_encodings(img)[0]
    except:
        pass
import face_recognition

p1 = face_recognition.load_image_file("p1.jpeg")
p2 = face_recognition.load_image_file("p2.jpeg")
p3 = face_recognition.load_image_file("p3.jpeg")
p4 = face_recognition.load_image_file("p4.jpeg")

# Get the face encodings of each person
p1_face_encoding = face_recognition.face_encodings(p1)[0]
p2_face_encoding = face_recognition.face_encodings(p2)[0]
p3_face_encoding = face_recognition.face_encodings(p3)[0]
p4_face_encoding = face_recognition.face_encodings(p4)[0]

# Create list of all known face face_encodings
known_face_encodings = [
    p1_face_encoding, p2_face_encoding, p3_face_encoding, p4_face_encoding
]

# Load the image we want to check
u1 = face_recognition.load_image_file("u1.jpeg")

# Get face encodings for any face in picture
u1_face_encodings = face_recognition.face_encodings(u1)

for u1_face_encoding in u1_face_encodings:

    #Test if this unknown face encoding matched any of the 4 people

    result = face_recognition.compare_faces(known_face_encodings,
                                            u1_face_encoding)
예제 #46
0
    # If the hight is too high
    if max_height < height:
        # Calculate the amount the image has to shrink
        scaling_factor = max_height / float(height)
        # Apply that factor to the frame
        frame = cv2.resize(frame,
                           None,
                           fx=scaling_factor,
                           fy=scaling_factor,
                           interpolation=cv2.INTER_AREA)

    # Save the new size for diagnostics
    scale_height, scale_width = frame.shape[:2]

    # Get all faces from that frame as encodings
    face_encodings = face_recognition.face_encodings(frame)

    # Loop through each face
    for face_encoding in face_encodings:
        # Match this found face against a known face
        matches = face_recognition.face_distance(encodings, face_encoding)

        # Check if any match is certain enough to be the user we're looking for
        match_index = 0
        for match in matches:
            match_index += 1

            # Try to find a match that's confident enough
            if match * 10 < float(config.get("video",
                                             "certainty")) and match > 0:
                timings.append(time.time())
예제 #47
0
 def get_encoding(self, encoding_path):
     image_path = Path(encoding_path.parent, "image", encoding_path.stem)
     if image_path.exists() and image_path.is_file():
         image = self.load_image(image_path)
         return face_recognition.face_encodings(image)
예제 #48
0
import face_recognition
import cv2

video_capture = cv2.VideoCapture(0)


biden_image = face_recognition.load_image_file("priyam.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]

known_face_encodings = [
    biden_face_encoding
]
known_face_names = [
    "Priyam Harsh"
]

face_locations = []
face_encodings = []
face_names = []
process_this_frame = True

while True:
    ret, frame = video_capture.read()

    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

    rgb_small_frame = small_frame[:, :, ::-1]

    if process_this_frame:
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
예제 #49
0
def exec_face_match1xnVideo(tolerancia, video):
    retorno = api_classes.FacematchRetorno1xn()
    video_capturado = cv2.VideoCapture(video)

    # Definições de algumas variaveis
    # Definitions of some variables
    diretorio_com_faces = "dev\\fotos"
    localizacoes_faces = []
    codificacao_faces = []
    nomes_faces = []
    processar_este_frame = True
    # contador de frames para limitação da verificação
    # frame counter to limit verification
    # contador de cuadros para limitar la verificación
    contagem_frame = 0
    # de quantos em quantos frames irão ocorrer as avaliações
    # how many in how many frames will ratings take place
    # cuántos en cuántos cuadros tendrán lugar las clasificaciones
    selecionar_frames_do_video = 15
    # lista todos os nomes conhecidos encontrados
    # lists all known names found
    # enumera todos los nombres conocidos encontrados
    encontrados = []
    conhecido_codificacao_faces = []
    conhecido_face_nomes = []

    # Carrega base de imagens cadastradas em .npy
    # Load base of images registered in .npy
    # Base de carga de imágenes registradas en .npy
    db_faces = carregar_db_faces()
    db_img_encode = []
    db_img_name = []

    # Definição das imagens e organização em lista.
    # Definition of images and organization in list.
    # Definición de imágenes y organización en lista.
    for db_item in db_faces:
        nome_pessoa_db = db_item.split('\\')[len(db_item.split('\\')) -
                                             1].replace('.npy', '')
        try:
            conhecido_codificacao_faces.append(np.load(db_item)[0])
            conhecido_face_nomes.append(nome_pessoa_db)
        except:
            pass

    # Leitura do vídeo, o loop correrá de acordo com o número de frames previamento preenchido na variavel "selecionar_frames_do_video".
    # Read the video, the loop will run according to the number of frames pre-filled in the variable "select_frames_do_video".
    # Lea el video, el bucle se ejecutará de acuerdo con el número de fotogramas rellenados previamente en la variable "select_frames_do_video".
    while True:
        ret, frame = video_capturado.read()

        try:
            rgb_frame = frame[:, :, ::-1]
        except:
            break

        processar_este_frame = False
        contagem_frame = contagem_frame + 1
        if contagem_frame == selecionar_frames_do_video:
            contagem_frame = 0
            processar_este_frame = True

        if processar_este_frame:
            localizacoes_faces = face_recognition.face_locations(rgb_frame)
            codificacao_faces = face_recognition.face_encodings(
                rgb_frame, localizacoes_faces)

            nomes_faces = []
            # a cada frame comparado ele valida se exxiste no banco de faces cadastrados no servidor.
            # each frame compared it validates if it exists in the face bank registered on the server.
            # cada fotograma comparado se valida si existe en el banco frontal registrado en el servidor.
            for face_encoding in codificacao_faces:
                matches = face_recognition.compare_faces(
                    conhecido_codificacao_faces, face_encoding, tolerancia)
                nome = "Desconhecido"

                # todos os nomes encontrados são colocados em uma lista para serem retornados no final da função.
                # all names found are placed in a list to be returned at the end of the function.
                # todos los nombres encontrados se colocan en una lista para ser devueltos al final de la función.
                if True in matches:
                    first_match_index = matches.index(True)
                    nome = conhecido_face_nomes[first_match_index]
                    encontrados.append(nome)

                nomes_faces.append(nome)
    # antes de retornar os nomes, a função remover duplicados é executada afim de remover nomes repetidos encontrados no vídeo.
    # before returning the names, the remove duplicates function is performed in order to remove repeated names found in the video.
    # antes de devolver los nombres, la función eliminar duplicados se realiza para eliminar los nombres repetidos que se encuentran en el video.
    retorno.encontrados = remover_duplicados(encontrados)
    # Retorno do resultado em objeto json.
    # Result return in json object.
    # Resultado devuelto en objeto json.
    return json.dumps(retorno.__dict__)
예제 #50
0
            print("- Face {} found at Left: {} Top: {} Right: {} Bottom: {}".format(len(dets), d.left(), d.top(), d.right(), d.bottom()))
            shape = predictor(frame_resized, d)
            
            shape = shape_to_np(shape)
            print (shape)
            r=0

            # loop over the (x, y)-coordinates for the facial landmarks
            # and draw them on the image
            r=0
            #win.set_image(frame)
            for (x, y) in shape:
                
               #cv2.circle(frame, (int(x/ratio), int(y/ratio)), 3, (255, 255, 255), -1)
               cv2.rectangle(frame, (int(d.left()/ratio), int(d.top()/ratio)),(int(d.right()/ratio), int(d.bottom()/ratio)), (0, 255, 0), 1)

            
            encodings = face_recognition.face_encodings(frame_rgb, shape)
            print(encodings)
            alignedFace = face_aligner.align(534, frame_grey, d, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
            cv2.imwrite("alignedface{}.jpg".format(r),alignedFace)
            r +=1
            cv2.imshow("image", frame)
            
                        
                
    if cv2.waitKey(1) & 0xFF == ord('q'):
        camera.release()
        cv2.destroyAllWindows()
        break
예제 #51
0
import cv2
import face_recognition
import sqlite3


input_video = cv2.VideoCapture(0)
peter_image = face_recognition.load_image_file("Image from iOS.jpg")
peter_face_encoding = face_recognition.face_encodings(peter_image)[0]


bayne_image = face_recognition.load_image_file("IMG_20190807_162611.jpg")
bayne_face_encoding = face_recognition.face_encodings(bayne_image)[0]


faheem_image = face_recognition.load_image_file("faheem.jpg")
faheem_face_encoding = face_recognition.face_encodings(faheem_image)[0]

priya_image = face_recognition.load_image_file("priya.jpg")
priya_face_encoding = face_recognition.face_encodings(priya_image)[0]

rahul_image = face_recognition.load_image_file("rahul.jpg")
rahul_face_encoding = face_recognition.face_encodings(rahul_image)[0]




known_faces = [
    peter_face_encoding,
    bayne_face_encoding,
    faheem_face_encoding,
    priya_face_encoding,
예제 #52
0
def identify():
    # Initialize some variables
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True
    center = None

    # ret, frame = video_capture.read()
    # camera.start_preview()
    # time.sleep(1)

    camera.capture(frame, format="rgb")

    # Resize frame of video to 1/4 size for faster face recognition processing
    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    # rgb_small_frame = small_frame[:, :, ::-1]
    rgb_small_frame = small_frame

    # Only process every other frame of video to save time
    if process_this_frame:
        # Find all the faces and face encodings in the current frame of video
        face_locations = face_recognition.face_locations(rgb_small_frame)
        face_encodings = face_recognition.face_encodings(
            rgb_small_frame, face_locations)
        print("detected face at location: ", face_locations)

        if (len(face_locations) == 1):
            top, right, bottom, left = face_locations[0]
            center = (left * 4 + right * 4) / 2
            print("face center point is at: ", center)
        elif (len(face_locations) > 1):
            print("Multiple faces. Not moving the camera")
        else:
            print("No faces detected")

        face_names = []
        for face_encoding in face_encodings:
            # See if the face is a match for the known face(s)
            matches = face_recognition.compare_faces(known_face_encodings,
                                                     face_encoding)
            name = "Unknown"

            # If a match was found in known_face_encodings, just use the first one.
            if True in matches:
                first_match_index = matches.index(True)
                name = known_face_names[first_match_index]

            face_names.append(name)

    process_this_frame = not process_this_frame

    # Display the results
    for (top, right, bottom, left), name in zip(face_locations, face_names):
        # Scale back up face locations since the frame we detected in was scaled to 1/4 size
        top *= 4
        right *= 4
        bottom *= 4
        left *= 4

        # Draw a box around the face
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)

        # Draw a label with a name below the face
        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255),
                      cv2.FILLED)
        font = cv2.FONT_HERSHEY_DUPLEX
        cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
                    (255, 255, 255), 1)

    # Display the resulting image
    cv2.imshow('Video', frame)
    cv2.waitKey(50) & 0xFF == ord('q')

    # Hit 'q' on the keyboard to quit!
    #while True:
    #    if cv2.waitKey(1) & 0xFF == ord('q'):
    #        break
    # Release handle to the webcam
    #video_capture.release()
    cv2.destroyAllWindows()
    return center
예제 #53
0
import face_recognition
import glob, os

try:
    # Load the jpg files into numpy arrays
    chuck_image = face_recognition.load_image_file("face_recognition_source/trump.jpg")
    obama_image = face_recognition.load_image_file("face_recognition_source/obama.jpg")
    unknown_image = face_recognition.load_image_file("face_recognition_source/obama2.jpg")

    list_of_unknown_image = []
    list_of_unknown_face_encoding = []

    chuck_face_encoding = face_recognition.face_encodings(chuck_image)[0]
    obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
    unknown_face_encoding = face_recognition.face_encodings(unknown_image)[0]

    known_faces = [
        obama_face_encoding
    ]

    os.chdir("face_recognition_source/president")
    for file in glob.glob("*.jpg"):
        print(file)
        #list_of_unknown_face_encoding.append(face_recognition.face_encodings(face_recognition.load_image_file(file))[0])
        # results is an array of True/False telling if the unknown face matched anyone in the known_faces array
        results = face_recognition.compare_faces(known_faces, face_recognition.face_encodings(face_recognition.load_image_file(file))[0])

        print("Is the unknown face a picture of Obama? {}".format(results[0]))
        #print("Is the unknown face a picture of Obama? {}".format(results[1]))
        print("Is the unknown face a new person that we've never seen before? {}".format(not True in results))
예제 #54
0
def detect_pyramid(cv_image, parameters):

    # Do a pyramidal detection on an image using the face_recognition
    # library. I found that my (very old) GPU didn't handle the large images
    # very well, and downscaling tends to miss small faces. So I do
    # a multi-part detection pyramid - detect at the top level, then
    # split into a 3x3 grid and detect at each of those levels. The
    # faces are then fused between detects in another script.

    assert isinstance(cv_image, np.ndarray)
    assert 'upsample' in parameters.keys()
    assert 'height' in parameters.keys()
    assert 'width' in parameters.keys()

    start_time = time.time()
    max_pixels_per_chip = int(parameters['height']) * int(parameters['width'])
    num_upsamples = int(parameters['upsample'])

    # Downsample the image so that it has the number of
    # pixels defined by height and width in the parameters.
    # This is dynamic to preserve aspect ratio. This should
    # make it possible to fit in the GPU and we can get the biggest,
    # hardest-to-miss faces.
    height = cv_image.shape[0]
    width = cv_image.shape[1]

    num_pixels = height * width
    num_chips = float(num_pixels / max_pixels_per_chip)
    num_iters = np.sqrt(num_chips)

    num_faces = 0

    faceList = []

    # A measure of how much we want the sub-images to overlap,
    # where 1 is 100% overlap.
    pct_exp = 0.06

    # Cut the image in a 3x3 grid, using split_range. Then we will
    # expand these even cuts slightly on top of each other to catch
    # faces that are on the border between the grids.
    for cuts in [1, 2]:
        # Get lists of left/right and top/bottom indices.
        width_parts = split_range(width, cuts)
        height_parts = split_range(height, cuts)

        # Expansion of the borders by a few percent.
        width_x_percent = int(pct_exp * width / cuts)
        height_x_percent = int(pct_exp * height / cuts)

        for leftIdx in range(len(width_parts) - 1):
            for topIdx in range(len(height_parts) - 1):

                # Get the top/bottom, left/right of each
                # grid.
                left_edge_0 = width_parts[leftIdx]
                right_edge_0 = width_parts[leftIdx + 1]
                top_edge_0 = height_parts[topIdx]
                bottom_edge_0 = height_parts[topIdx + 1]

                # Since the faces may be split on an edge,
                # put in a pct_exp% overlap between tiles.
                # Also have logic for only going to the
                # edge of the image.
                left_edge = max(0, left_edge_0 - width_x_percent)
                top_edge = max(0, top_edge_0 - height_x_percent)
                right_edge = min(width, right_edge_0 + width_x_percent)
                bottom_edge = min(height, bottom_edge_0 + height_x_percent)

                assert left_edge < right_edge
                assert top_edge < bottom_edge

                assert (bottom_edge -
                        top_edge) <= int((bottom_edge_0 - top_edge_0) *
                                         (1 + pct_exp * 2) + 1)
                assert (right_edge -
                        left_edge) <= int((right_edge_0 - left_edge_0) *
                                          (1 + pct_exp * 2) + 1)

                # Cut out the chip.
                chip_part = cv_image[top_edge:bottom_edge,
                                     left_edge:right_edge]

                # Then resize it to fit in the GPU memory, based
                # on the parameters passed to the function.
                height_chip = chip_part.shape[0]
                width_chip = chip_part.shape[1]
                pixels_here = height_chip * width_chip
                resize_ratio = np.sqrt(
                    float(pixels_here) / max_pixels_per_chip)

                resized_chip = cv2.resize(chip_part, \
                    ( int( width_chip / resize_ratio ), \
                      int( height_chip / resize_ratio ) ) )

                # Detect the locations of the faces in a given chip
                # using face_recognition's CNN model.
                face_locations = face_recognition.face_locations(resized_chip, \
                    number_of_times_to_upsample=num_upsamples,  model='cnn')

                num_faces += len(face_locations)

                # Iterate over the detecte faces
                for index in range(len(face_locations)):
                    # Get the locations of the face from the
                    # small, resized chip. These indices will
                    # need to be scaled back up for proper
                    # identification.
                    top_chip, right_chip, bottom_chip, left_chip = face_locations[
                        index]

                    # While our rectangle class does have a
                    # resize method, it wouldn't appropriately
                    # account for the shift on sub-images.
                    # So we need to build our own. This will
                    # get the locations of the chip in the larger
                    # original image.
                    top_scaled = int(top_chip * resize_ratio + top_edge)
                    bottom_scaled = int(bottom_chip * resize_ratio + top_edge)
                    left_scaled = int(left_chip * resize_ratio + left_edge)
                    right_scaled = int(right_chip * resize_ratio + left_edge)

                    height_face = int(np.abs(bottom_scaled - top_scaled))
                    width_face = int(np.abs(right_scaled - left_scaled))

                    face_loc_rescaled = [(top_scaled, right_scaled,
                                          bottom_scaled, left_scaled)]

                    # Get the encoding on the upscaled image
                    # using the upsampled face bounding boxes
                    ### # encoding = face_recognition.face_encodings(cv_image, known_face_locations=face_loc_rescaled, num_jitters=10)
                    encoding = face_recognition.face_encodings(
                        cv_image,
                        known_face_locations=face_loc_rescaled,
                        num_jitters=400,
                        model='large')

                    assert len(encoding) == 1
                    encoding = encoding[0]

                    # Draw a rectangle on the image if desired.
                    # cv2.rectangle(cv_image, (left_scaled, top_scaled), (right_scaled, bottom_scaled), (0, 255, 0), 5)
                    # pil_image = Image.fromarray(face_image)
                    # pil_image.show()

                    face_img = cv_image[top_scaled:bottom_scaled,
                                        left_scaled:right_scaled]

                    face_loc_rect = Rectangle(height_face,
                                              width_face,
                                              leftEdge=left_scaled,
                                              topEdge=top_scaled)

                    face = FaceRect(rectangle=face_loc_rect,
                                    face_image=face_img,
                                    encoding=encoding,
                                    name=None,
                                    detection_level=cuts)
                    # Append the face to the list. No effort to de-duplicate
                    # has been made yet -- that's in another script.
                    faceList.append(face)

    faceList = list(set(faceList))

    # Get rid of faces with exact rectangle duplicates
    faces_skip = []

    for i, face in enumerate(faceList):
        for i2 in range(i + 1, len(faceList)):
            if faceList[i].rectangle == faceList[i2].rectangle:
                print(i, i2)
                faces_skip.append(i2)

    faces_skip.sort()
    # print(faceList[faces_skip[0]])
    for i in range(len(faces_skip) - 1, -1, -1):
        faceList.pop(faces_skip[i])

    elapsed_time = time.time() - start_time
    print("Elapsed time is : " + str(elapsed_time), len(faceList))

    # Drawing function
    # for eachFace in list(set(faceList)):
    #     r = eachFace.rectangle
    #     left = r.left
    #     right = r.right
    #     top = r.top
    #     bottom = r.bottom
    #     cv2.rectangle(cv_image, (left, top), (right, bottom), (255, 0, 0), 5)

    # Convert to OpenCV colors, get a resized window, and show image.
    #    cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
    #    cv2.namedWindow('Resized Window', cv2.WINDOW_NORMAL)
    #    cv2.resizeWindow('Resized Window', 800, 600)
    #    cv2.imshow('Resized Window', cv_image)
    #    cv2.waitKey(0)

    return faceList, elapsed_time
예제 #55
0
def main():
    # GETTING KNOWN ENCODINGS AND NAMES
    home = str(os.path.dirname(os.path.abspath(__file__))) + "/../../"
    known_encodings_file_path = home + "/data/known_encodings_file.csv"
    people_file_path = home + "/data/people_file.csv"
    # For storing the encoding of a face
    known_encodings_file = Path(known_encodings_file_path)
    if known_encodings_file.is_file():
        known_encodings = np.genfromtxt(known_encodings_file, delimiter=',')
    else:
        known_encodings = []

    # #For Storing the name corresponding to the encoding
    people_file = Path(people_file_path)
    if people_file.is_file():
        people = np.genfromtxt(people_file, dtype='U',delimiter=',')
    else:
        people = []



# MAIN WORK

    #Capture Video indefinitely
    video_capture = cv2.VideoCapture(0)
    # time.delay(2)
    # TODO: GET FROM DATABASE
    # known encodings of persons in database.
    # known_encodings = []
    # people = []

    #Some important variables
    face_locations = []
    face_encodings = []
    face_names = []
    process_this_frame = True
    #Eat the Meat, Hmm process the image
    while True:

        # 
        #     1.) Capture the frame from the video.
        #     2.) Compress it to its 1/4th size for faster speed.
        #     3.) If this frame has to be processed, find face_location, face_encodings.
        #     4.) Match with the known_encodings and set the name for each face else Unknown
        #     5.) Add a border around face.
        #         if RED: 
        #             unverified or not authenticated
        #         elif GREEN:
        #             everything OK ;)
        #     6.) Show the frame 
        # 
        ret, frame = video_capture.read()

        #smaller frame 1/4th of original size
        small_frame = cv2.resize(frame, (0,0), fx=.25, fy=.25)

        if process_this_frame:
            #Find the face locations
            face_locations = face_recognition.face_locations(small_frame)
            #Find the face encodings 128 Dimensional!!
            face_encodings = face_recognition.face_encodings(small_frame, face_locations)

            face_names=[]
            other = 0 #Count of un-authorised people
            for face_encoding in face_encodings:
                match = face_recognition.compare_faces(known_encodings, face_encoding)
                name = "Unknown"

                #Find if this person is in the present people array
                for i in range(len(match)):
                    if match[i]:
                        name = people[i]
                        break

                if "Unknown" in name:
                    other += 1
                    name += str(other)
                face_names.append(name)
        
        # Send the names of the people to the parent process
        # os.write(3,b'{"dt" : "This is a test"}')
        print(face_names, flush=True)
            
        process_this_frame = not process_this_frame
        

        #Display the border
        for (top, right, bottom, left),name in zip(face_locations, face_names):
            #Scale up the coordinates by 4 to get face
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4

            #Assuming person in authenticated
            color =  (0,255,0)  #GREEN
            if not authorised(name):
                #Unauthenticated person
                color = (0,0,255) #RED
                #print so that parent process in Node.js can use it
                print(name,flush=True)

            #Display border
            cv2.rectangle(frame, (left,top), (right,bottom), color, 2)

            # Draw a label with name
            cv2.rectangle(frame, (left,bottom-35), (right, bottom), color, cv2.FILLED)
            font = cv2.FONT_HERSHEY_DUPLEX
            cv2.putText(frame, name,(left+6, bottom-6), font, 1.0, (255,255,255), 1)

        # Display the resulting image with borders and names
        cv2.imshow('Video', frame)

        # Hit 'q' on keyboard to quit
        if cv2.waitKey(100) == 27:
            break
            
    #Release handle to the webcam
    video_capture.release()
    cv2.closeAllWindows()
예제 #56
0
face_locations = []
face_names = []
face_encodings = []
found_names = []
students = []

for file in files:
    if file.endswith('.jpg'):
        x = file.split('.')
        file_names.append(path + file)
        names.append(x[0])

for file in file_names:
    if file.endswith('.jpg'):
        temp = face_recognition.load_image_file(file)
        if face_recognition.face_encodings(temp):
            encodings.append(face_recognition.face_encodings(temp)[0])
            print(file)

file = open('students.txt', 'r')
for line in file:
    students.append(line.rstrip('\n'))

while True:

    ret, frame = vid.read()

    crop = None

    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
예제 #57
0
import face_recognition
import cv2
import numpy as np

# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)

rtvik_image_1 = face_recognition.load_image_file("known_users/Rtvik.jpg")
rtvik_face_encoding_1 = face_recognition.face_encodings(rtvik_image_1)[0]
abhay_image_1 = face_recognition.load_image_file("known_users/Abhay.jpg")
abhay_face_encoding_1 = face_recognition.face_encodings(abhay_image_1)[0]

known_face_encodings = [
   rtvik_face_encoding_1,
   abhay_face_encoding_1,
]

known_face_names = [
   "Rtvik",
   "Abhay",
]

# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True

while True:
    # Grab a single frame of video
    ret, frame = video_capture.read()
    def recognise(self):
        """Recognizing known face"""
        # load the known faces and embeddings
        print("[INFO] loading encodings...")
        data = pickle.loads(open(self.encodings, "rb").read())

        # initialize the video stream and pointer to output video file, then
        # allow the camera sensor to warm up
        print("[INFO] starting video stream...")
        vs = VideoStream(src=0).start()
        writer = None
        time.sleep(2.0)

        # loop over frames from the video file stream
        while True:
            # grab the frame from the threaded video stream
            frame = vs.read()

            # convert the input frame from BGR to RGB then resize it to have
            # a width of 750px (to speedup processing)
            rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            rgb = imutils.resize(frame, width=self.resolution)
            r = frame.shape[1] / float(rgb.shape[1])

            # detect the (x, y)-coordinates of the bounding boxes
            # corresponding to each face in the input frame, then compute
            # the facial embeddings for each face
            boxes = face_recognition.face_locations(
                rgb, model=self.detection_method)
            encodings = face_recognition.face_encodings(rgb, boxes)
            names = []

            # loop over the facial embeddings
            for encoding in encodings:
                # attempt to match each face in the input image to our known
                # encodings
                matches = face_recognition.compare_faces(
                    data["encodings"], encoding)
                name = "Unknown"

                # check to see if we have found a match
                if True in matches:
                    # find the indexes of all matched faces then initialize a
                    # dictionary to count the total number of times each face
                    # was matched
                    matched_idxs = [i for (i, b) in enumerate(matches) if b]
                    counts = {}

                    # loop over the matched indexes and maintain a count for
                    # each recognized face face
                    for i in matched_idxs:
                        name = data["names"][i]
                        counts[name] = counts.get(name, 0) + 1

                    # determine the recognized face with the largest number
                    # of votes (note: in the event of an unlikely tie Python
                    # will select first entry in the dictionary)
                    name = max(counts, key=counts.get)

                # update the list of names
                names.append(name)

            # loop over the recognized faces
            for ((_, _, _, _), name) in zip(boxes, names):
                cv2.destroyAllWindows()
                vs.stop()
                if writer is not None:
                    writer.release()
                return name

            # if the video writer is None *AND* we are supposed to write
            # the output video to disk initialize the writer
            if writer is None and self.output is not None:
                four_cc = cv2.VideoWriter_fourcc(*"MJPG")
                writer = cv2.VideoWriter(self.output, four_cc, 20,
                                         (frame.shape[1], frame.shape[0]),
                                         True)

            # if the writer is not None, write the frame with recognized
            # faces to disk
            if writer is not None:
                writer.write(frame)

            # check to see if we are supposed to display the output frame to
            # the screen
            if self.display > 0:
                cv2.imshow("Frame", frame)
                key = cv2.waitKey(1) & 0xFF

                # if the `q` key was pressed, break from the loop
                if key == ord("q"):
                    cv2.destroyAllWindows()
                    vs.stop()
                    if writer is not None:
                        writer.release()
                    return ""
예제 #59
0
knownEncodedList = generateEncodings(database_images)
print("Encoding complete")

# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
process_this_frame = True
while True:
    success, frame = video_capture.read()

    # for optimisation, each frame is resized to 1/4 of its value
    resizedFrame = cv2.resize(frame, (0, 0), None, fx=0.25, fy=0.25)

    if process_this_frame:
        currFacesLocations = face_recognition.face_locations(resizedFrame)
        currFacesEncodings = face_recognition.face_encodings(
            resizedFrame, currFacesLocations)

        for encoding, location in zip(currFacesEncodings, currFacesLocations):
            # matches is a list containing true or false values for each image
            # for improved accuracy, a tolerance of 0.5 is used --subject to change...
            matches = face_recognition.compare_faces(knownEncodedList,
                                                     encoding, 0.5)
            faceDistance = face_recognition.face_distance(
                knownEncodedList, encoding)
            matchIndex = np.argmin(faceDistance)
            name = 'Unknown'
            if matches[matchIndex]:
                name = known_face_names[matchIndex].upper()
                recordAttendance(name)
            y1, x1, y2, x2 = location
            y1, x1, y2, x2 = y1 * 4, x1 * 4, y2 * 4, x2 * 4
예제 #60
0
predicted_classes = []
k = 0

#    for file in tests[:33]:
os.chdir(destdir)
k = 0
for file in files:
    k += 1
    print(k)
    known_obama_image = face_recognition.load_image_file(file)
    # Get the face encodings for the known images
    width = known_obama_image.shape[0]
    height = known_obama_image.shape[1]
    face_location = (0, width, height, 0)
    face_locations = [face_location]
    obama_face_encoding = face_recognition.face_encodings(
        known_obama_image, face_locations)[0]
    if k == 1:
        arrayf = obama_face_encoding
    else:
        arrayf = np.vstack([arrayf, obama_face_encoding])

df = pd.DataFrame(arrayf)
names = pd.DataFrame(files)
named = pd.concat([names, df], axis=1, ignore_index=True)
#named.to_csv('/home/pete/compare/dataframe4.csv', encoding='utf-8', index=False)
#df = pd.read_csv('embed1.csv', header=None)
#
#enter name of file with face
j = 0
while True:
    try: