def loader(f_encoding, kfn, kfe): #TODO : Add Text Log too in addition to video logs. #temp vars : failed, fold_name, folder, image #params : f_encoding, kfn, kfe print('Loading DataSet...') failed = 0 cwd = os.getcwd() + '/faces/' for folder in os.listdir(cwd): fold_name = folder folder = 'faces/' + folder if os.path.isdir(folder): #print('\tLoading :'+folder) for subfolder in os.listdir(folder): #print('\t\t'+subfolder) path = folder + '/' + subfolder + '/*.jpg' for img in glob.glob(path): # Add all images to comparison list try: #print('\t\t\t'+img) kfn += [fold_name] image = face_rec.load_image_file(img) f_encoding.append(face_rec.face_encodings(image)[0]) # Append the results # print(len(f_encoding)) except: failed += 1 else: #print('\t\t\tNo images found') pass print('Loaded ' + str(len(f_encoding)) + ' samples') print('Failed Loading ' + str(failed) + ' samples') #Copying Encoded faces from f_encoding to Known Face Encoding(kfe) kfe = f_encoding.copy()
import numpy as np import face_rec import cv2 import urllib.request as u url = 'rtsp://192.168.0.102:8080/h264_ulaw.sdp' video = cv2.VideoCapture(url) #loading sample pictures f1 = face_rec.load_image_file('faces/modi.jpg') f3 = face_rec.load_image_file('faces/trump.jpg') f5 = face_rec.load_image_file('faces/kamal.jpg') #learn how to recognise it f1_encoding = face_rec.face_encodings(f1)[0] f3_encoding = face_rec.face_encodings(f3)[0] f5_encoding = face_rec.face_encodings(f5)[0] #array for known encodings kfe = [ f1_encoding, f3_encoding, f5_encoding ]
import numpy as np import face_rec import cv2 import urllib.request as u url = 'rtsp://192.168.88.129:8080/h264_ulaw.sdp' video = cv2.VideoCapture(url) #loading sample pictures f1 = face_rec.load_image_file('faces/modi.jpg') f2 = face_rec.load_image_file('faces/nagesh.jpg') f3 = face_rec.load_image_file('faces/trump.jpg') f4 = face_rec.load_image_file('faces/yb.jpg') f5 = face_rec.load_image_file('faces/kamal.jpg') f6 = face_rec.load_image_file('faces/swarna.png') f7 = face_rec.load_image_file('faces/atchaya.png') f8 = face_rec.load_image_file('faces/js.png') f9 = face_rec.load_image_file('faces/rethanya.png') f10 = face_rec.load_image_file('faces/swetha.png') f11 = face_rec.load_image_file('faces/thamarai.png') f12 = face_rec.load_image_file('faces/hariharan.png') f13 = face_rec.load_image_file('faces/priyesh.png') f14 = face_rec.load_image_file('faces/thilak.png') f15 = face_rec.load_image_file('faces/veno.png') f16 = face_rec.load_image_file('faces/jeyarani.png') #learn how to recognise it f1_encoding = face_rec.face_encodings(f1)[0] f2_encoding = face_rec.face_encodings(f2)[0] f3_encoding = face_rec.face_encodings(f3)[0]
failed = 0 kfn = [] # known face names list cwd = os.getcwd() + '/faces/' for folder in os.listdir(cwd): fold_name = folder folder = 'faces/' + folder if os.path.isdir(folder): print('\tLoading :'+folder) for subfolder in os.listdir(folder): print('\t\t'+subfolder) path = folder + '/' + subfolder + '/*.jpg' for img in glob.glob(path): # Add all images to comparison list try: print('\t\t\t'+img) kfn += [fold_name] image = face_rec.load_image_file(img) f_encoding.append(face_rec.face_encodings(image)[0]) # Append the results # print(len(f_encoding)) except: failed += 1 else: print('\t\t\tNo images found') print('Loaded ' + str(len(f_encoding)) + ' samples') print('Failed Loading ' + str(failed) + ' samples') #Copying Encoded faces from f_encoding to Known Face Encoding(kfe) kfe = f_encoding.copy() #Loading known face names(kfn) from image names reside in folder #kfn = [] #parent_dir = 'faces/'
def init(kfn, kfe): #temp vars : names, flag, floc, fe, ctr, prev_name, video, width, height, vname, out, # rframe, rgbrframe, matched_faces, fdist, best_match, name, gray, names = [] flag = True floc = [] fe = [] ctr=0 name = " " prev_name = " " print('Press " e " | " r " to enroll..') video = cv2.VideoCapture(0) #start cam width = int(video.get(3)) height = int(video.get(4)) vname = 'output.avi' out = cv2.VideoWriter(vname,cv2.VideoWriter_fourcc('M','J','P','G'), 10, (width,height)) while(1): ret,frame = video.read() #grab frame by frame while(1) rframe = cv2.resize(frame,(0,0),fx=0.25,fy=0.25) #not needed , #just to make the process faster rgbrframe = cv2.cvtColor(rframe,cv2.COLOR_BGR2RGB)#cv2 uses BGR color whereas, #face_rec uses RGB , so reverse content out.write(frame) # write to videoLog if flag: floc = face_rec.face_locations(rgbrframe) # grab face from frame fe = face_rec.face_encodings(rgbrframe,floc) # grab face encodings from frame for fenc in fe: matched_faces = face_rec.compare_faces(kfe,fenc) fdist = face_rec.face_distance(kfe,fenc) best_match = np.argmin(fdist) if matched_faces[best_match]: try: name = kfn[best_match] if prev_name != name: print(name + ' - '+str(datetime.datetime.now())) prev_name = name except Exception: pass else: name = 'Unknown' if prev_name != name: print('\t!!! Security Alert !!!\n\t\tDetected ' + name + '- ' + str(datetime.datetime.now())) prev_name = name gray = cv2.cvtColor(rframe,cv2.COLOR_BGR2GRAY)#converting unknowface frame to gray faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")#haarcascade unknown person front face faces = faceCascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=3, minSize=(50, 50) ) if len(faces):#faces contails multiple unknown faces in single frame for (x, y, w, h) in faces: roi_color = rframe[y : y + 240 , x : x + 360] #cropping unknown face ctr = ctr + 1 path = 'unknownDetected/' cv2.imwrite(os.path.join(path , str(ctr)+'Unknown_Face.jpg'), roi_color) #Saving cropped image of unknown to unknownDetected Folder filename = path + str(ctr)+'Unknown_Face.jpg' f1 = face_rec.load_image_file(filename) try: f1_encoding = face_rec.face_encodings(f1)[0]#Encoding unknown face except: pass kfe.append(f1_encoding) s = 'Suspect'+str(ctr) #Detected Unknown face encoded as Suspect :) kfn.append(s) r1 = r.randint(0,100): if r1 / 2 == 0: s.sframes(name,frame) flag = not flag names += [name] cv2.imshow('Video', frame) #show frames as being processed. print('Here',*names) #No markings are done on live frames so as to reduce the flickering on screen/video feed. Instead, as like on generic systems, results are shown on a terminal/console. #TODO : Show on Text Area while adding Flask front-end. if cv2.waitKey(1) & 0xFF == ord('q'): out.release() video.release() cv2.destroyAllWindows() break if cv2.waitKey(1) & 0XFF == ord('e') or cv2.waitKey(1) & 0XFF == ord('r'): print('Begin Enrollment....') video.release() cv2.destroyAllWindows() e.enroll() s.store(names) init() return
import numpy as np import face_rec import cv2 import urllib.request as u url = 'rtsp://192.168.0.102:8080/h264_ulaw.sdp' video = cv2.VideoCapture(url) #loading sample pictures f1 = face_rec.load_image_file(image) #learn how to recognise it f1_encoding = face_rec.face_encodings(f1)[0] #array for known encodings kfe = [] #array for known face names kfn = [] names = [] flag = True floc = [] fe = [] count = 0 while (1): ret, frame = video.read() #grab frame by frame while(1) rframe = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) #not needed ,
def findout(image, iname): print(image) url = 'rtsp://192.168.43.1:8080/h264_ulaw.sdp' # url = 'http://192.168.43.1:8080/video' video = cv2.VideoCapture(url) #loading sample pictures f1 = face_rec.load_image_file(image) #learn how to recognise it f1_encoding = face_rec.face_encodings(f1)[0] #array for known encodings kfe = [f1_encoding] #array for known face names kfn = [iname] names = [] flag = True floc = [] fe = [] count = 0 while (1): ret, frame = video.read() #grab frame by frame while(1) rframe = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) #not needed , #just to make the process faster rgbrframe = cv2.cvtColor( rframe, cv2.COLOR_BGR2RGB) #cv2 uses BGR color whereas, #face_rec uses RGB , so reverse content if flag: floc = face_rec.face_locations(rgbrframe) # grab face from frame fe = face_rec.face_encodings( rgbrframe, floc) # grab face encodings from frame names = [] for fenc in fe: matched_faces = face_rec.compare_faces(kfe, fenc) name = 'Unknown' fdist = face_rec.face_distance(kfe, fenc) best_match = np.argmin(fdist) if matched_faces[best_match]: name = kfn[best_match] names.append(name) flag = not flag # Display the results for (top, right, bottom, left), name in zip(floc, names): top *= 4 # resize image back again by *0.25 right *= 4 bottom *= 4 left *= 4 cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 4) # Draw a box around the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 0), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) #label the face h, w, l = frame.shape new_h = int(h / 2) new_w = int(w / 2) rzframe = cv2.resize(frame, (new_w, new_h)) cv2.imshow('Cam_feed', rzframe) count += 1 if cv2.waitKey(1) & 0xFF == ord('q'): print('processed ', count, 'frames') video.release() cv2.destroyAllWindows() break