def create_store(fileName, name, fname, age, mob): try: img = face_recognition_api.load_image_file(fileName) imgEncoding = face_recognition_api.face_encodings(img) if len(imgEncoding) > 1: print('More than one face found in the image') if len(imgEncoding) == 0: print('No Face found in the Image') else: print('Encoded successfully.') encoded = convert_encoding(imgEncoding) name = name.replace(' ', '*') fname = fname.replace(' ', '*') print(name) print(fname) uniqueKey = str(name) + '@' + str(age) + '@' + str(fname) + '@' + str( mob) print(uniqueKey) root = db.reference('stationID') new_user = root.child('ABC123').child('pending').child(uniqueKey).set( {'encoded': encoded}) print('Done') return "YES" except: return "NO"
def scan_known_people(known_people_folder): known_names = [] known_face_encodings = [] for file in image_files_in_folder(known_people_folder): basename = os.path.splitext(os.path.basename(file))[0] img = face_recognition_api.load_image_file(file) encodings = face_recognition_api.face_encodings(img) if len(encodings) == 0: print("No faces") else: print("Found") known_names.append(basename) known_face_encodings.append(encodings[0]) return known_names, known_face_encodings
def create_dataset(training_dir_path, labels): X = [] for i in _zipped_folders_labels_images(training_dir_path, labels): for fileName in i[2]: file_path = os.path.join(i[0], fileName) img = face_recognition_api.load_image_file(file_path) imgEncoding = face_recognition_api.face_encodings(img) if len(imgEncoding) > 1: print('\x1b[0;37;43m' + 'More than one face found in {}. Only considering the first face.'.format(file_path) + '\x1b[0m') if len(imgEncoding) == 0: print('\x1b[0;37;41m' + 'No face found in {}. Ignoring file.'.format(file_path) + '\x1b[0m') else: print('Encoded {} successfully.'.format(file_path)) X.append(np.append(imgEncoding[0], i[1])) return X
def find_key_pts(): key_pts = [] os.chdir('../') with open('images/locations.txt', 'r') as f: locations = f.read() locations = locations.split('\n') for l in locations: l = l.replace(' ', ',') l = l.split(',') image = l[0] loc = l[1] path = os.path.join('images', image) img = face_recognition_api.load_image_file(path) faces_encodings = face_recognition_api.face_encodings(img) if faces_encodings: key_pts.append([faces_encodings, image, loc]) return key_pts
def load_all(self): results = self.db.select( 'SELECT faces.id, faces.user_id, faces.filename, faces.created FROM faces' ) self.layer_size = 0 count = 0 for row in results: user_id = row[1] filename = row[2] face = { "id": row[0], "user_id": user_id, "filename": filename, "created": row[3] } self.faces.append(face) face_image = face_recognition_api.load_image_file( self.load_train_file_by_name(filename)) face_image_encoding = face_recognition_api.face_encodings( face_image)[0] index_key = len(self.known_encoding_faces) self.known_encoding_faces.append(face_image_encoding) index_key_string = str(index_key) self.face_user_keys['{0}'.format(index_key_string)] = user_id print('user_id', user_id) if count == 0: self.layer_size = len(face_image_encoding) self.tree = AnnoyIndex(self.layer_size, metric) # prepare index self.tree.add_item(user_id, face_image_encoding) count += 1 print 'building index...\n' if self.layer_size > 0: print 'layer_size=', self.layer_size self.tree.build(ntrees) self.tree.save('index.ann')
def recognize(self, unknown_filename): tree = loadannoy() (unfile, unfile_face) = self.load_unknown_file_by_name(unknown_filename) unknown_image = face_recognition_api.load_image_file(unfile) unknown_encoding_image = face_recognition_api.face_encodings( unknown_image)[0] #results = face_recognition.compare_faces(self.known_encoding_faces, unknown_encoding_image); results2 = find_matching_id(unknown_encoding_image, tree) guess_age = age_predict.predict([unfile_face]) guess_gender = gender_predict.predict([unfile_face]) #print("results", results) print("results2", results2) if results2: matching_id, min_dist = results2 user_id = matching_id #self.load_user_by_index_key(matching_id) return (user_id, guess_age, guess_gender) return ('unknown', guess_age, guess_gender) '''
process_this_frame = True with warnings.catch_warnings(): warnings.simplefilter("ignore") while True: # Grab a single frame of video ret, frame = video_capture.read() # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition_api.face_locations(small_frame) face_encodings = face_recognition_api.face_encodings( small_frame, face_locations) face_names = [] predictions = [] if len(face_encodings) > 0: closest_distances = clf.kneighbors(face_encodings, n_neighbors=1) is_recognized = [ closest_distances[0][i][0] <= 0.5 for i in range(len(face_locations)) ] # predict classes and cull classifications that are not with high confidence predictions = [ (le.inverse_transform(int(pred)).title(), loc) if rec else
def Fillattendances(): sub = tx.get() if sub == '': err_screen1() else: df = pd.read_csv("StudentDetails\StudentDetails.csv") video_capture = cv2.VideoCapture(0) fname = 'classifier.pkl' if os.path.isfile(fname): with open(fname, 'rb') as f: (le, clf) = pickle.load(f) else: print('\x1b[0;37;43m' + "Classifier '{}' does not exist".format(fname) + '\x1b[0m') quit() # Initialize some variables face_locations = [] face_encodings = [] face_names = [] process_this_frame = True col_names = ['Enrollment', 'Name', 'Date', 'Time', 'status'] attendance = pd.DataFrame(columns=col_names) with warnings.catch_warnings(): warnings.simplefilter("ignore") while True: # Grab a single frame of video ret, frame = video_capture.read() # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition_api.face_locations( small_frame) face_encodings = face_recognition_api.face_encodings( small_frame, face_locations) face_names = [] predictions = [] global Id if len(face_encodings) > 0: closest_distances = clf.kneighbors(face_encodings, n_neighbors=1) is_recognized = [ closest_distances[0][i][0] <= 0.5 for i in range(len(face_locations)) ] global Subject global aa global date global timeStamp Subject = tx.get() ts = time.time() date = datetime.datetime.fromtimestamp( ts).strftime('%Y-%m-%d') timeStamp = datetime.datetime.fromtimestamp( ts).strftime('%H:%M:%S') status = "P" # predict classes and cull classifications that are not with high confidence predictions = [ (le.inverse_transform(int(pred)).title(), loc) if rec else ("Unknown.person", loc) for pred, loc, rec in zip( clf.predict(face_encodings), face_locations, is_recognized) ] # # Predict the unknown faces in the video frame # for face_encoding in face_encodings: # face_encoding = face_encoding.reshape(1, -1) # # # predictions = clf.predict(face_encoding).ravel() # # person = le.inverse_transform(int(predictions[0])) # # predictions = clf.predict_proba(face_encoding).ravel() # maxI = np.argmax(predictions) # person = le.inverse_transform(maxI) # confidence = predictions[maxI] # print(person, confidence) # if confidence < 0.7: # person = 'Unknown' # # face_names.append(person.title()) process_this_frame = not process_this_frame reg = 0 Name = 0 # Display the results for name, (top, right, bottom, left) in predictions: # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 4 right *= 4 bottom *= 4 left *= 4 # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) #while name!='Unknown': # Draw a label with a name below the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) reg = os.path.split(name)[-1].split(".")[1] Name = os.path.split(name)[-1].split(".")[0] attendance.loc[len(attendance)] = [ reg, Name, date, timeStamp, status ] # Display the resulting image cv2.imshow('Video', frame) attendance = attendance.drop_duplicates(['Enrollment'], keep='first') #attendance = attendance[attendance.Enrollment == 'person'] # Hit 'q' on the keyboard to quit! if cv2.waitKey(1) & 0xFF == ord('q'): break video_capture.release() # Release handle to the webcam Batch = ty.get() Subject = tx.get() ts = time.time() date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d') timeStamp = datetime.datetime.fromtimestamp(ts).strftime( '%H:%M:%S') Hour, Minute, Second = timeStamp.split(":") fileName = "Attendance/" + Batch + "_" + Subject + "_" + date + "_" + Hour + "-" + Minute + "-" + Second + ".csv" import pymysql.connections ###Connect to the database try: global cursor connection = pymysql.connect(host='localhost', user='******', password='', db='attendance') cursor = connection.cursor() except Exception as e: print(e) sql = "SELECT * FROM students WHERE BRANCH='" + Batch + "'" # Execute the SQL command cursor.execute(sql) # Fetch all the rows in a list of lists. results = cursor.fetchall() count = 0 for row in results: no = row[1] namee = row[2] #Attendance[status] = ["P" if Enrollment in reg else "A"] status = "A" attendance.loc[len(attendance)] = [no, namee, '', '', status] attendance = attendance.drop_duplicates(subset=["Name"], keep='first') #DataFrame.drop(labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise') #attendance = attendance[attendance.Enrollment == 'person'] indexNames = attendance[attendance['Name'] == 'Unknown'].index # Delete these row indexes from dataFrame attendance.drop(indexNames, inplace=True) print(attendance) attendance = attendance.drop_duplicates(subset=["Enrollment"], keep='first') attendance.to_csv(fileName, index=True) #file_df=pd.read_excel(fileName) #attendance=file_df.drop_duplicates(subset=["Enrollment"], keep='first') #attendance.to_csv(fileName, index=True) ##Create table for Attendance date_for_DB = datetime.datetime.fromtimestamp(ts).strftime( '%Y_%m_%d') DB_Table_name = str(Batch + "_" + Subject + "_" + date_for_DB + "_Time_" + Hour + "_" + Minute + "_" + Second) import pymysql.connections ###Connect to the database try: connection = pymysql.connect(host='localhost', user='******', password='', db='automatic') cursor = connection.cursor() except Exception as e: print(e) sql = "CREATE TABLE " + DB_Table_name + """ (ID INT NOT NULL AUTO_INCREMENT, ENROLLMENT varchar(100) NOT NULL, NAME VARCHAR(50) NOT NULL, DATE VARCHAR(20) NOT NULL, TIME VARCHAR(20) NOT NULL, STATUS VARCHAR(20) NOT NULL, PRIMARY KEY (ID) ); """ ####Now enter attendance in Database insert_data = "INSERT INTO " + DB_Table_name + " (ID,ENROLLMENT,NAME,DATE,TIME,STATUS) VALUES (0, %s, %s, %s,%s,%s)" VALUES = (str(reg), str(Name), str(date), str(timeStamp), str(status)) try: cursor.execute(sql) ##for create a table cursor.execute(insert_data, VALUES) ##For insert data into table except Exception as ex: print(ex) # M = 'Attendance filled Successfully' Notifica.configure(text=M, bg="Green", fg="white", width=33, font=('times', 15, 'bold')) Notifica.place(x=20, y=250) VideoCapture.release() cv2.destroyAllWindows() import csv import tkinter root = tkinter.Tk() root.title("Attendance of " + Subject) root.configure(background='snow') cs = 'D:/project/dev/Attendace managemnt system/' + fileName with open(cs, newline="") as file: reader = csv.reader(file) r = 0 for col in reader: c = 0 for row in col: # i've added some styling label = tkinter.Label(root, width=8, height=1, fg="black", font=('times', 15, ' bold '), bg="lawn green", text=row, relief=tkinter.RIDGE) label.grid(row=r, column=c) c += 1 r += 1 root.mainloop() print(attendance)
with open(fname, 'rb') as f: (le, clf) = pickle.load(f) else: print('\x1b[0;37;43m' + "Classifier '{}' does not exist".format(fname) + '\x1b[0m') quit() for image_path in get_prediction_images(prediction_dir): # print colorful text with image name print('\x1b[6;30;42m' + "=====Predicting faces in '{}'=====".format(image_path) + '\x1b[0m') img = face_recognition_api.load_image_file(image_path) X_faces_loc = face_recognition_api.face_locations(img) faces_encodings = face_recognition_api.face_encodings( img, known_face_locations=X_faces_loc) print("Found {} faces in the image".format(len(faces_encodings))) closest_distances = clf.kneighbors(faces_encodings, n_neighbors=1) is_recognized = [ closest_distances[0][i][0] <= 0.5 for i in range(len(X_faces_loc)) ] # predict classes and cull classifications that are not with high confidence predictions = [(le.inverse_transform(int(pred)).title(), loc) if rec else ("Unknown", loc) for pred, loc, rec in zip(clf.predict(faces_encodings), X_faces_loc, is_recognized)] print(predictions)
def get_frame(self): if self.isFace is False: # Load Face Recogniser classifier fname = 'classifier.pkl' if os.path.isfile(fname): with open(fname, 'rb') as f: (le, clf) = pickle.load(f) else: print('\x1b[0;37;43m' + "Classifier '{}' does not exist".format(fname) + '\x1b[0m') quit() # Initialize some variables face_locations = [] face_encodings = [] face_names = [] process_this_frame = True ret, frame = self.video.read() # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition_api.face_locations( small_frame) face_encodings = face_recognition_api.face_encodings( small_frame, face_locations) face_names = [] predictions = [] if len(face_encodings) > 0: closest_distances = clf.kneighbors(face_encodings, n_neighbors=1) is_recognized = [ closest_distances[0][i][0] <= 0.5 for i in range(len(face_locations)) ] # predict classes and cull classifications that are not with high confidence predictions = [ (le.inverse_transform(int(pred)).title(), loc) if rec else ("Unknown", loc) for pred, loc, rec in zip(clf.predict( face_encodings), face_locations, is_recognized) ] # # Predict the unknown faces in the video frame # for face_encoding in face_encodings: # face_encoding = face_encoding.reshape(1, -1) # # # predictions = clf.predict(face_encoding).ravel() # # person = le.inverse_transform(int(predictions[0])) # # predictions = clf.predict_proba(face_encoding).ravel() # maxI = np.argmax(predictions) # person = le.inverse_transform(maxI) # confidence = predictions[maxI] # print(person, confidence) # if confidence < 0.7: # person = 'Unknown' # # face_names.append(person.title()) process_this_frame = not process_this_frame # Display the results for name, (top, right, bottom, left) in predictions: # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 4 right *= 4 bottom *= 4 left *= 4 # id = getFromDB(name) # cust_name = id[1] if id[1] is not None else name # cust_id = str(id[0]) if id[0] is not None else '1' # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) # Draw a label with a name below the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_COMPLEX cv2.putText(frame, 'ID: ' + getFilePositionName(name), (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) cv2.putText(frame, 'Name: ' + name, (left + 6, bottom + 19), font, 1.0, (255, 255, 255), 1) face_name = name self.count += 1 if self.count > 10: self.isFace = True ret, jpeg = cv2.imencode('.jpg', frame) return jpeg.tobytes() else: return self.gesture.get_frame()
def get_facial_points(img): return face_recognition_api.face_encodings(img)
with open(fname, 'rb') as f: (le, clf) = pickle.load(f) else: print('\x1b[0;37;43m' + "Classifier '{}' does not exist".format(fname) + '\x1b[0m') quit() curr = time.time() for image_path in get_prediction_images(prediction_dir): # print colorful text with image name print('\x1b[6;30;42m' + "=====Predicting faces in '{}'=====".format(image_path) + '\x1b[0m') img = face_recognition_api.load_image_file(image_path) faces_encodings = face_recognition_api.face_encodings(img) print("Found {} faces in the image".format(len(faces_encodings))) for face_encoding in faces_encodings: face_encoding = face_encoding.reshape(1, -1) predictions = clf.predict_proba(face_encoding).ravel() maxI = np.argmax(predictions) person = le.inverse_transform(maxI) confidence = predictions[maxI] print("Predict {} with {:.2f} confidence.".format(person, confidence)) # print(face_recognition_api.compare_faces(X, face_encoding)) # # predictions = clf.predict(face_encoding).ravel() # person = le.inverse_transform(int(predictions[0]))
def main(): fname = 'classifier.pkl' prediction_dir = './test-images' encoding_file_path = './encoded-images-data.csv' df = pd.read_csv(encoding_file_path) full_data = np.array(df.astype(float).values.tolist()) # Extract features and labels # remove id column (0th column) X = np.array(full_data[:, 1:-1]) y = np.array(full_data[:, -1:]) if os.path.isfile(fname): with open(fname, 'rb') as f: (le, clf) = pickle.load(f) else: print('\x1b[0;37;43m' + "Classifier '{}' does not exist".format(fname) + '\x1b[0m') quit() for image_path in get_prediction_images(prediction_dir): # print colorful text with image name print('\x1b[6;30;42m' + "=====Predicting faces in '{}'=====".format(image_path) + '\x1b[0m') img = face_recognition_api.load_image_file(image_path) X_faces_loc = face_recognition_api.face_locations(img) faces_encodings = face_recognition_api.face_encodings( img, known_face_locations=X_faces_loc) print("Found {} faces in the image".format(len(faces_encodings))) closest_distances = clf.kneighbors(faces_encodings, n_neighbors=1) is_recognized = [ closest_distances[0][i][0] <= 0.5 for i in range(len(X_faces_loc)) ] # store=[] # # for pred, loc, rec in zip(clf.predict(faces_encodings), X_faces_loc, is_recognized): # a=le.inverse_transform(int(pred)).title() # b=loc # # if rec: # store.append([a,b]) # else: # store.append("unknown", loc) # predict classes and cull classifications that are not with high confidence predictions = [(le.inverse_transform([int(pred)])[0], loc) if rec else ("Unknown", loc) for pred, loc, rec in zip(clf.predict(faces_encodings), X_faces_loc, is_recognized)] print(predictions) # for face_encoding in faces_encodings: # face_encoding = face_encoding.reshape(1, -1) # # predictions = clf.predict_proba(face_encoding).ravel() # maxI = np.argmax(predictions) # person = le.inverse_transform(maxI) # confidence = predictions[maxI] # print("Predict {} with {:.2f} confidence.".format(person, confidence)) print()