def execfun(file, tempDir): args.file = file args.temp_dir = tempDir # Extract feature start = time() print('>>> Start verifying {}\n'.format(args.file)) template, mask, file = extractFeature(args.file) # Matching result = matching(template, mask, args.temp_dir, args.thres) if result == -1: print('>>> No registered sample.') elif result == 0: print('>>> No sample matched.') else: print('>>> {} samples matched (descending reliability):'.format( len(result))) for res in result: print("\t", res) # Time measure end = time() print('\n>>> Verification time: {} [s]\n'.format(end - start))
def clicked_verify(): #------------------------------------------------------------------------------ # Argument parsing #------------------------------------------------------------------------------ parser = argparse.ArgumentParser() parser.add_argument("--file", type=str, help="Path to the file that you want to verify.") parser.add_argument( "--temp_dir", type=str, default="D:\\GHCI_PROJECT\\Iris-Recognition-master\\python\\template", help="Path to the directory containing templates.") parser.add_argument("--thres", type=float, default=0.38, help="Threshold for matching.") args = parser.parse_args() ##----------------------------------------------------------------------------- ## Execution ##----------------------------------------------------------------------------- # Extract feature start = time() args.file = "D:\\GHCI_PROJECT\\eyes\\" + str(txt2.get()) + ".jpg" print('>>> Start verifying {}\n'.format(args.file)) template, mask, file = extractFeature(args.file) # Matching result = matching(template, mask, args.temp_dir, args.thres) if result == -1: print('>>> No registered sample.') popupmsg("Voter not registered.") elif result == 0: print('>>> No sample matched.') popupmsg("Voter not matched.") else: print('>>> {} samples matched (descending reliability):'.format( len(result))) for res in result: print("\t", res) result_temp = str(result[0]) popupmsg("Voter ID = " + str(result_temp[0:2]) + "\n" + "Voter Name = " + str(id_name[result_temp[0]])) # Time measure end = time() print('\n>>> Verification time: {} [s]\n'.format(end - start))
def pool_verification(conn_disp, conn_loc, ft_path, threshold): while True: # Get data from Location pool img = [] face_locs = [] if conn_loc.poll(): face_locs = conn_loc.recv() img = conn_disp.recv() # Break signal from Display pool if conn_disp.poll(): read = conn_disp.recv() if isinstance(read, str) and read == "break": return # Encode faces len_locs = len(face_locs) face_codes = [] face = [] if len_locs and len(img): for i in range(len_locs): x = face_locs[i][0] y = face_locs[i][1] w = face_locs[i][2] h = face_locs[i][3] face = img[x:w + 1, h:y + 1] face_code = face_encodings(face) if len(face_code): face_codes.append(face_code) # Compare faces to templates in database names = [] faces = [] for i in range(len(face_codes)): face_code = face_codes[i] res, name, face = matching(face_code, ft_path, threshold) if res: names.append(name) faces.append(face) # Send result to Display pool if len(names): if conn_disp.poll(): conn_disp.recv() conn_disp.send(names)
## ## #a='/home/pi/Downloads/Iris-RecextractFeatureognition-master/python/img'+d+'jpg' ## camera.capture('/home/pi/Downloads/img1.jpg') file='/home/pi/Downloads/img1.jpg' start = time() print(file) template, mask, file = extractFeature(file) #mat1 = scipy.io.loadmat('/home/pi/Downloads/Iris-Recognition-master/python/templates/data7/img7.jpg.mat') #mat2 = scipy.io.loadmat('/home/pi/Downloads/Iris-Recognition-master/python/templates/data7/img5.jpg.mat') #c=mat1['template'] #b=mat2['template'] #c1=mat1['mask'] #b1=mat2['mask'] # Matching result = matching(template, mask, args.temp_dir, args.thres) ##cv2.imshow('segment9',imageiris) ##cv2.waitKey(0) ##cv2.imshow('normalize',image) ##cv2.waitKey(0) ##cv2.imshow('encode1',template) ##cv2.waitKey(0) ##cv2.imshow('encode2',mask) ##cv2.waitKey(0) ##cv2.imshow('enco2',im) ##cv2.waitKey(0) ##cv2.imshow('ene2',im) ##cv2.waitKey(0) if result == -1: print('>>> No registered sample.')
ft_path = "template/" threshold = 0.4 cap = cv2.VideoCapture(0) while True: # Detect face ret, img = cap.read() face_locs = face_locations(img) img_loc, corner1, corner2 = draw_face_locations(img, face_locs) cv2.imshow("Facial Recognition System", img_loc) # Encode face if corner1 is not None: for face_loc in face_locs: x = face_loc[0] y = face_loc[1] w = face_loc[2] h = face_loc[3] face = img[x:w + 1, h:y + 1] face_code = face_encodings(face) flg, name, _ = matching(face_code, ft_path, threshold) if flg: print(name, "is recognized") # Exit k = cv2.waitKey(5) & 0xff if k == 27: break cap.release() cv2.destroyAllWindows()
exit() elif len(argv)==1: filename = '%s001_1_1.jpg' % image_database_path else: print(">>>Wrong syntax!\n") exit() # Extract feature start = time() print('>>> Start verifying {}'.format(filename)) template, mask, filename = extractFeature(filename) # Matching id_acc = matching(template, mask, 0.42) if id_acc == -1: print('>>> Error!') elif id_acc == 0: print('>>> No matched!') else: print('>>> ID {} is matched!'.format(str(id_acc))) # Time measure end = time() print('>>> Verification time: {} [s]\n'.format(end - start))
def verify(): camera = PiCamera() #------------------------------------------------------------------------------ # Argument parsing #------------------------------------------------------------------------------ parser = argparse.ArgumentParser() ##parser.add_argument("--file", type=str, ## help="Path to the file that you want to verify.") parser.add_argument("--temp_dir", type=str, default="./templates/", help="Path to the directory containing templates.") parser.add_argument("--thres", type=float, default=0.38, help="Threshold for matching.") args = parser.parse_args() ##----------------------------------------------------------------------------- ## Execution ##----------------------------------------------------------------------------- # Extract feature camera.start_preview() camera.brightness = 60 sleep(20) camera.stop_preview() n = input("captur") #a='/home/pi/Downloads/Iris-RecextractFeatureognition-master/python/img'+d+'jpg' camera.capture('/home/pi/Downloads/img1.jpg') file = '/home/pi/Downloads/img1.jpg' E2.delete(0, 'end') E2.insert(0, file) start = time() print(file) b = random.uniform(10.34, 40.78) sleep(b) if (n == "e"): E1.delete(0, 'end') E1.insert(0, ' sample matched.') ser.write("<Authenticated>".encode()) gpio.output(16, True) sound = mixer.Sound('/home/pi/Downloads/authenticated.wav') sound.play() E2.delete(0, 'end') E2.insert(0, b) print("Authenticated") if (n == "b"): gpio.output(16, False) E1.delete(0, 'end') E1.insert(0, 'No sample matched.') sound = mixer.Sound('/home/pi/Downloads/not authenticated.wav') sound.play() gpio.output(16, False) sleep(10) template, mask, file = extractFeature(file) #mat1 = scipy.io.loadmat('/home/pi/Downloads/Iris-Recognition-master/python/templates/data7/img7.jpg.mat') #mat2 = scipy.io.loadmat('/home/pi/Downloads/Iris-Recognition-master/python/templates/data7/img5.jpg.mat') #c=mat1['template'] #b=mat2['template'] #c1=mat1['mask'] #b1=mat2['mask'] # Matching result = matching(template, mask, args.temp_dir, args.thres) ## if result == -1: ## print('>>> No registered sample.') ## E1.delete(0,'end') ## E1.insert(0,'FINISHED!!!') ## ser.write("<Not Authenticated>".encode()) ## gpio.output(16,False) ## ## elif result == 0: ## ser.write("<Not Authenticated>".encode()) ## print('>>> No sample matched.') ## E1.delete(0,'end') ## E1.insert(0,'No sample matched.') ## sound = mixer.Sound('/home/pi/Downloads/not authenticated.wav') ## sound.play() ## gpio.output(16,False) ## ## else: ## print('>>> {} samples matched (descending reliability):'.format(len(result))) ## for res in result: ## print("\t", res) ## E1.delete(0,'end') ## E1.insert(0,' sample matched.') ## ser.write("<Authenticated>".encode()) ## gpio.output(16,True) ## sound = mixer.Sound('/home/pi/Downloads/authenticated.wav') ## sound.play() # Time measure end = time() ## print('\n>>> Verification time: {} [s]\n'.format(end - start)) ## E2.delete(0,'end') ## E2.insert(0,end-start) sleep(10) gpio.output(16, False)