def face_gen(image_path, face_output): # Load the jpg file into a numpy array image = face_recognition_api.load_image_file(image_path) # Find all the faces in the image using a pre-trained convolutional neural network. # This method is more accurate than the default HOG model, but it's slower # unless you have an nvidia GPU and dlib compiled with CUDA extensions. But if you do, # this will use GPU acceleration and perform well. # See also: find_faces_in_picture.py face_locations = face_recognition_api.face_locations( image ) #face_recognition.face_locations(image, number_of_times_to_upsample=0, model="cnn") print("I found {} face(s) in this photograph.".format(len(face_locations))) if len(face_locations) < 1: outname = face_output.split('/')[-1] shutil.copy(image_path, 'trainData2face/zero_face/' + outname) for face_location in face_locations: # Print the location of each face in this image top, right, bottom, left = face_location print( "A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}" .format(top, left, bottom, right)) # You can access the actual face itself like this: face_image = image[top:bottom, left:right] pil_image = Image.fromarray(face_image) pil_image.save(face_output)
face_names = [] process_this_frame = True with warnings.catch_warnings(): warnings.simplefilter("ignore") while True: # Grab a single frame of video ret, frame = video_capture.read() # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition_api.face_locations(small_frame) face_encodings = face_recognition_api.face_encodings( small_frame, face_locations) face_names = [] predictions = [] if len(face_encodings) > 0: closest_distances = clf.kneighbors(face_encodings, n_neighbors=1) is_recognized = [ closest_distances[0][i][0] <= 0.5 for i in range(len(face_locations)) ] # predict classes and cull classifications that are not with high confidence
def Fillattendances(): sub = tx.get() if sub == '': err_screen1() else: df = pd.read_csv("StudentDetails\StudentDetails.csv") video_capture = cv2.VideoCapture(0) fname = 'classifier.pkl' if os.path.isfile(fname): with open(fname, 'rb') as f: (le, clf) = pickle.load(f) else: print('\x1b[0;37;43m' + "Classifier '{}' does not exist".format(fname) + '\x1b[0m') quit() # Initialize some variables face_locations = [] face_encodings = [] face_names = [] process_this_frame = True col_names = ['Enrollment', 'Name', 'Date', 'Time', 'status'] attendance = pd.DataFrame(columns=col_names) with warnings.catch_warnings(): warnings.simplefilter("ignore") while True: # Grab a single frame of video ret, frame = video_capture.read() # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition_api.face_locations( small_frame) face_encodings = face_recognition_api.face_encodings( small_frame, face_locations) face_names = [] predictions = [] global Id if len(face_encodings) > 0: closest_distances = clf.kneighbors(face_encodings, n_neighbors=1) is_recognized = [ closest_distances[0][i][0] <= 0.5 for i in range(len(face_locations)) ] global Subject global aa global date global timeStamp Subject = tx.get() ts = time.time() date = datetime.datetime.fromtimestamp( ts).strftime('%Y-%m-%d') timeStamp = datetime.datetime.fromtimestamp( ts).strftime('%H:%M:%S') status = "P" # predict classes and cull classifications that are not with high confidence predictions = [ (le.inverse_transform(int(pred)).title(), loc) if rec else ("Unknown.person", loc) for pred, loc, rec in zip( clf.predict(face_encodings), face_locations, is_recognized) ] # # Predict the unknown faces in the video frame # for face_encoding in face_encodings: # face_encoding = face_encoding.reshape(1, -1) # # # predictions = clf.predict(face_encoding).ravel() # # person = le.inverse_transform(int(predictions[0])) # # predictions = clf.predict_proba(face_encoding).ravel() # maxI = np.argmax(predictions) # person = le.inverse_transform(maxI) # confidence = predictions[maxI] # print(person, confidence) # if confidence < 0.7: # person = 'Unknown' # # face_names.append(person.title()) process_this_frame = not process_this_frame reg = 0 Name = 0 # Display the results for name, (top, right, bottom, left) in predictions: # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 4 right *= 4 bottom *= 4 left *= 4 # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) #while name!='Unknown': # Draw a label with a name below the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) reg = os.path.split(name)[-1].split(".")[1] Name = os.path.split(name)[-1].split(".")[0] attendance.loc[len(attendance)] = [ reg, Name, date, timeStamp, status ] # Display the resulting image cv2.imshow('Video', frame) attendance = attendance.drop_duplicates(['Enrollment'], keep='first') #attendance = attendance[attendance.Enrollment == 'person'] # Hit 'q' on the keyboard to quit! if cv2.waitKey(1) & 0xFF == ord('q'): break video_capture.release() # Release handle to the webcam Batch = ty.get() Subject = tx.get() ts = time.time() date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d') timeStamp = datetime.datetime.fromtimestamp(ts).strftime( '%H:%M:%S') Hour, Minute, Second = timeStamp.split(":") fileName = "Attendance/" + Batch + "_" + Subject + "_" + date + "_" + Hour + "-" + Minute + "-" + Second + ".csv" import pymysql.connections ###Connect to the database try: global cursor connection = pymysql.connect(host='localhost', user='******', password='', db='attendance') cursor = connection.cursor() except Exception as e: print(e) sql = "SELECT * FROM students WHERE BRANCH='" + Batch + "'" # Execute the SQL command cursor.execute(sql) # Fetch all the rows in a list of lists. results = cursor.fetchall() count = 0 for row in results: no = row[1] namee = row[2] #Attendance[status] = ["P" if Enrollment in reg else "A"] status = "A" attendance.loc[len(attendance)] = [no, namee, '', '', status] attendance = attendance.drop_duplicates(subset=["Name"], keep='first') #DataFrame.drop(labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise') #attendance = attendance[attendance.Enrollment == 'person'] indexNames = attendance[attendance['Name'] == 'Unknown'].index # Delete these row indexes from dataFrame attendance.drop(indexNames, inplace=True) print(attendance) attendance = attendance.drop_duplicates(subset=["Enrollment"], keep='first') attendance.to_csv(fileName, index=True) #file_df=pd.read_excel(fileName) #attendance=file_df.drop_duplicates(subset=["Enrollment"], keep='first') #attendance.to_csv(fileName, index=True) ##Create table for Attendance date_for_DB = datetime.datetime.fromtimestamp(ts).strftime( '%Y_%m_%d') DB_Table_name = str(Batch + "_" + Subject + "_" + date_for_DB + "_Time_" + Hour + "_" + Minute + "_" + Second) import pymysql.connections ###Connect to the database try: connection = pymysql.connect(host='localhost', user='******', password='', db='automatic') cursor = connection.cursor() except Exception as e: print(e) sql = "CREATE TABLE " + DB_Table_name + """ (ID INT NOT NULL AUTO_INCREMENT, ENROLLMENT varchar(100) NOT NULL, NAME VARCHAR(50) NOT NULL, DATE VARCHAR(20) NOT NULL, TIME VARCHAR(20) NOT NULL, STATUS VARCHAR(20) NOT NULL, PRIMARY KEY (ID) ); """ ####Now enter attendance in Database insert_data = "INSERT INTO " + DB_Table_name + " (ID,ENROLLMENT,NAME,DATE,TIME,STATUS) VALUES (0, %s, %s, %s,%s,%s)" VALUES = (str(reg), str(Name), str(date), str(timeStamp), str(status)) try: cursor.execute(sql) ##for create a table cursor.execute(insert_data, VALUES) ##For insert data into table except Exception as ex: print(ex) # M = 'Attendance filled Successfully' Notifica.configure(text=M, bg="Green", fg="white", width=33, font=('times', 15, 'bold')) Notifica.place(x=20, y=250) VideoCapture.release() cv2.destroyAllWindows() import csv import tkinter root = tkinter.Tk() root.title("Attendance of " + Subject) root.configure(background='snow') cs = 'D:/project/dev/Attendace managemnt system/' + fileName with open(cs, newline="") as file: reader = csv.reader(file) r = 0 for col in reader: c = 0 for row in col: # i've added some styling label = tkinter.Label(root, width=8, height=1, fg="black", font=('times', 15, ' bold '), bg="lawn green", text=row, relief=tkinter.RIDGE) label.grid(row=r, column=c) c += 1 r += 1 root.mainloop() print(attendance)
if os.path.isfile(fname): with open(fname, 'rb') as f: (le, clf) = pickle.load(f) else: print('\x1b[0;37;43m' + "Classifier '{}' does not exist".format(fname) + '\x1b[0m') quit() for image_path in get_prediction_images(prediction_dir): # print colorful text with image name print('\x1b[6;30;42m' + "=====Predicting faces in '{}'=====".format(image_path) + '\x1b[0m') img = face_recognition_api.load_image_file(image_path) X_faces_loc = face_recognition_api.face_locations(img) faces_encodings = face_recognition_api.face_encodings( img, known_face_locations=X_faces_loc) print("Found {} faces in the image".format(len(faces_encodings))) closest_distances = clf.kneighbors(faces_encodings, n_neighbors=1) is_recognized = [ closest_distances[0][i][0] <= 0.5 for i in range(len(X_faces_loc)) ] # predict classes and cull classifications that are not with high confidence predictions = [(le.inverse_transform(int(pred)).title(), loc) if rec else ("Unknown", loc) for pred, loc, rec in zip(clf.predict(faces_encodings),
def get_frame(self): if self.isFace is False: # Load Face Recogniser classifier fname = 'classifier.pkl' if os.path.isfile(fname): with open(fname, 'rb') as f: (le, clf) = pickle.load(f) else: print('\x1b[0;37;43m' + "Classifier '{}' does not exist".format(fname) + '\x1b[0m') quit() # Initialize some variables face_locations = [] face_encodings = [] face_names = [] process_this_frame = True ret, frame = self.video.read() # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition_api.face_locations( small_frame) face_encodings = face_recognition_api.face_encodings( small_frame, face_locations) face_names = [] predictions = [] if len(face_encodings) > 0: closest_distances = clf.kneighbors(face_encodings, n_neighbors=1) is_recognized = [ closest_distances[0][i][0] <= 0.5 for i in range(len(face_locations)) ] # predict classes and cull classifications that are not with high confidence predictions = [ (le.inverse_transform(int(pred)).title(), loc) if rec else ("Unknown", loc) for pred, loc, rec in zip(clf.predict( face_encodings), face_locations, is_recognized) ] # # Predict the unknown faces in the video frame # for face_encoding in face_encodings: # face_encoding = face_encoding.reshape(1, -1) # # # predictions = clf.predict(face_encoding).ravel() # # person = le.inverse_transform(int(predictions[0])) # # predictions = clf.predict_proba(face_encoding).ravel() # maxI = np.argmax(predictions) # person = le.inverse_transform(maxI) # confidence = predictions[maxI] # print(person, confidence) # if confidence < 0.7: # person = 'Unknown' # # face_names.append(person.title()) process_this_frame = not process_this_frame # Display the results for name, (top, right, bottom, left) in predictions: # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 4 right *= 4 bottom *= 4 left *= 4 # id = getFromDB(name) # cust_name = id[1] if id[1] is not None else name # cust_id = str(id[0]) if id[0] is not None else '1' # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) # Draw a label with a name below the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_COMPLEX cv2.putText(frame, 'ID: ' + getFilePositionName(name), (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) cv2.putText(frame, 'Name: ' + name, (left + 6, bottom + 19), font, 1.0, (255, 255, 255), 1) face_name = name self.count += 1 if self.count > 10: self.isFace = True ret, jpeg = cv2.imencode('.jpg', frame) return jpeg.tobytes() else: return self.gesture.get_frame()
def main(): fname = 'classifier.pkl' prediction_dir = './test-images' encoding_file_path = './encoded-images-data.csv' df = pd.read_csv(encoding_file_path) full_data = np.array(df.astype(float).values.tolist()) # Extract features and labels # remove id column (0th column) X = np.array(full_data[:, 1:-1]) y = np.array(full_data[:, -1:]) if os.path.isfile(fname): with open(fname, 'rb') as f: (le, clf) = pickle.load(f) else: print('\x1b[0;37;43m' + "Classifier '{}' does not exist".format(fname) + '\x1b[0m') quit() for image_path in get_prediction_images(prediction_dir): # print colorful text with image name print('\x1b[6;30;42m' + "=====Predicting faces in '{}'=====".format(image_path) + '\x1b[0m') img = face_recognition_api.load_image_file(image_path) X_faces_loc = face_recognition_api.face_locations(img) faces_encodings = face_recognition_api.face_encodings( img, known_face_locations=X_faces_loc) print("Found {} faces in the image".format(len(faces_encodings))) closest_distances = clf.kneighbors(faces_encodings, n_neighbors=1) is_recognized = [ closest_distances[0][i][0] <= 0.5 for i in range(len(X_faces_loc)) ] # store=[] # # for pred, loc, rec in zip(clf.predict(faces_encodings), X_faces_loc, is_recognized): # a=le.inverse_transform(int(pred)).title() # b=loc # # if rec: # store.append([a,b]) # else: # store.append("unknown", loc) # predict classes and cull classifications that are not with high confidence predictions = [(le.inverse_transform([int(pred)])[0], loc) if rec else ("Unknown", loc) for pred, loc, rec in zip(clf.predict(faces_encodings), X_faces_loc, is_recognized)] print(predictions) # for face_encoding in faces_encodings: # face_encoding = face_encoding.reshape(1, -1) # # predictions = clf.predict_proba(face_encoding).ravel() # maxI = np.argmax(predictions) # person = le.inverse_transform(maxI) # confidence = predictions[maxI] # print("Predict {} with {:.2f} confidence.".format(person, confidence)) print()