def test_06_unrecog_valid(self): """\ Test face against a database that it does not exist in, and the graveful return. """ recogniser_invalid = facerecognition.FaceRecognition( self.pickle_file_invalid) recognition_invalid = recogniser_invalid.recognise_face() self.assertEqual(recognition_invalid, None)
def test_05_recog_valid(self): """ Test face against database it does exist in. """ print("LOOK AT CAMERA for 30 seconds!") recogniser_valid = facerecognition.FaceRecognition( self.pickle_file_valid) recognition_valid = recogniser_valid.recognise_face() self.assertEqual(recognition_valid, self.face_valid)
def main(): camid = 1 cap = cvs.VideoCapture(camid) facerecog = facerecognition.FaceRecognition("./models", 0.63) fcount = 0 start = time.time() while True: sleep(30) img = cvs.read() if img is None: continue fcount = fcount + 1 # global lbs lbs = 'Average FPS: ' + str(fcount / (time.time() - start)) cvs.setLbs(lbs) if camid == 1: img = cv2.flip(img, 1) #img=cv2.resize(img,(112,112)) image_char = img.astype(np.uint8).tostring() rets = facerecog.getfacepose(img.shape[0], img.shape[1], image_char) #print 'rets:',rets for ret in rets: #for ret in each: print('draw bounding box for the face') #cvs.infoshow('draw bounding box for the face') rect = ret['rect'] #print rect mtcnn = ret['mtcnn'] #print mtcnn for i in range(5): cvs.circle(img, (mtcnn[i], mtcnn[5 + i]), 2, (0, 0, 255), 2) keypoint = ret['keypoints'] #print keypoint p1 = (int(rect[0]), int(rect[1])) p2 = (int(rect[0] + rect[2]), int(rect[1] + rect[3])) #draw_name(img, rect, ret['name']) cvs.rectangle(img, p1, p2, (0, 255, 0), 3, 1) for p in range(0, 106): #print p*2,' = ',keypoint[p*2] #print p*2+1,' = ',keypoint[p*2+1] k1 = int(rect[0] + keypoint[p * 2]) k2 = int(rect[1] + keypoint[p * 2 + 1]) cv2.circle(img, (k1, k2), 2, (253, 0, 0), 2) cvs.imshow(img)
def main(): cap = cvs.VideoCapture(1) facerecog = facerecognition.FaceRecognition("./models", 0.73) max_none = 0 #facerecog = facerecognition.FaceRecognition("./models",0.6) while True: sleep(30) img = cvs.read() if img is None: continue #imshow(img) #continue #img=cv2.resize(img,(112,112)) image_char = img.astype(np.uint8).tostring() msgType, msgName = cvs.getMsg() if msgName != '' and msgType == 'add_person': ret = facerecog.add_person(msgName, img.shape[0], img.shape[1], image_char) if ret == 0: print 'you add_person is success!' cvs.setMsg_status(1) else: print 'you add_person is failed!' cvs.setMsg_status(-1) #cv2.putText(img, ret['name'], (int(rect[0]), int(rect[1])-30),cv2.FONT_ITALIC, 2, (77, 255, 9), 2) continue # rets = facerecog.recognize(img.shape[0], img.shape[1], image_char) print 'rets:', rets for ret in rets: #for ret in each: print 'draw bounding box for the face' rect = ret['rect'] p1 = (int(rect[0]), int(rect[1])) p2 = (int(rect[0] + rect[2]), int(rect[1] + rect[2])) #draw_name(img, rect, ret['name']) cv2.rectangle(img, p1, p2, (0, 255, 0), 3, 1) cv2.putText(img, ret['name'], (int(rect[0]), int(rect[1]) - 30), cv2.FONT_ITALIC, 2, (77, 255, 9), 2) cvs.infoshow('your name:' + ret['name']) #final = cv2.copyMakeBorder(img,0,0,64,64, cv2.BORDER_CONSTANT,value=[255,255,255]) #final=cv2.flip(final,1) cvs.imshow(img)
def process(): cap = cvs.VideoCapture(1) facerecog = facerecognition.FaceRecognition("./models", 0.73) while True: sleep(30) img = cap.read() if img is None: continue image_char = img.astype(np.uint8).tostring() userId = cvs.getLbs() if userId != '': ret = facerecog.add_person(userId, img.shape[0], img.shape[1], image_char) if ret == 0: print('you add_person is success!') # cvs.setMsg_status(1) else: print('you add_person is failed!') userId = '' cvs.setLbs(userId) continue rets = facerecog.recognize(img.shape[0], img.shape[1], image_char) #print 'rets:',rets for ret in rets: #for ret in each: print('draw bounding box for the face') rect = ret['rect'] p1 = (int(rect[0]), int(rect[1])) p2 = (int(rect[0] + rect[2]), int(rect[1] + rect[2])) #draw rect,names of faces cv2.rectangle(img, p1, p2, (0, 255, 0), 3, 1) cv2.putText(img, ret['name'], (int(rect[0]), int(rect[1]) - 30), cv2.FONT_ITALIC, 2, (77, 255, 9), 2) cvs.imshow(img)
def main(): cap=cvs.VideoCapture(1) facerecog = facerecognition.FaceRecognition("./models", 0.63) max_none=0 #facerecog = facerecognition.FaceRecognition("./models",0.6) while True: sleep(30) img =cvs.read() if img is None : continue #imshow(img) #continue img=cv2.flip(img,1) #img=cv2.resize(img,(112,112)) image_char = img.astype(np.uint8).tostring() rets = facerecog.getfacepose(img.shape[0], img.shape[1], image_char) #print 'rets:',rets for ret in rets: #for ret in each: print 'draw bounding box for the face' rect = ret['rect'] print rect mtcnn = ret['mtcnn'] print mtcnn for i in range(5): cv2.circle(img,(mtcnn[i],mtcnn[5+i]),2,(0,0,255),2) keypoint=ret['keypoints'] #print keypoint p1 = (int(rect[0]), int(rect[1])) p2 = (int(rect[0]+rect[2]), int(rect[1]+rect[3])) #draw_name(img, rect, ret['name']) cv2.rectangle(img, p1,p2, (0, 255, 0) , 3, 1) for p in range(0,106): print p*2,' = ',keypoint[p*2] print p*2+1,' = ',keypoint[p*2+1] k1=int(rect[0]+keypoint[p*2]) k2=int(rect[1]+keypoint[p*2+1]) cv2.circle(img,(k1,k2),2,(253,0,0),2) #cv2.putText(img, ret['name'], (20, 50),cv2.FONT_ITALIC, 2, (77, 255, 9), 2) #final = cv2.copyMakeBorder(img,0,0,64,64, cv2.BORDER_CONSTANT,value=[255,255,255]) #final=cv2.flip(final,1) cvs.imshow(img)
import numpy as np import time, os from multiprocessing import Process, Queue import struct import paho.mqtt.client as mqtt import argparse import SocketServer import time from PIL import Image parser = argparse.ArgumentParser() parser.add_argument('--svr', type=str, help='The ip for laptop training server') facerecg = facerecognition.FaceRecognition("./models", 0.63) HOST = '' PORT = 21575 ADDR = (HOST, PORT) bufSize = 4080 def recognize(rawData): img = Image.frombytes('RGB', (480, 272), rawData, 'raw', 'RGB;16') npimg = np.rot90(np.array(img), -1) image_char = npimg.astype(np.uint8).tostring() rets = facerecg.recognize(npimg.shape[0], npimg.shape[1], image_char) if rets is None or len(rets) == 0:
import numpy as np import facerecognition, cv2 class call_cpp(object): def __init__(self, name): self.itcom = mobilefacenet.communication(name) def run(self, img) : ret_list = self.itcom.test(img.shape[0], img.shape[1], image_char) gray = np.zeros([img.shape[0], img.shape[1]], np.ubyte) c = 0 for i in range(img.shape[0]): for j in range(img.shape[1]): gray[i][j] = ret_list[c] c += 1 return gray def run_class(self): return self.itcom.ret_box_list() if __name__ == '__main__': facerecog = facerecognition.FaceRecognition("../models/") img = cv2.imread('6.png') image_char = img.astype(np.uint8).tostring() #facerecog.recognize(img.shape[0], img.shape[1], image_char) facerecog.add_person("gf", img.shape[0], img.shape[1], image_char)