Esempio n. 1
0
    def __init__(self,username='******',threshold=80,update_val=True,tta=False,c=True,scale=0.3,min_face=20,embedding=512,bank_path='media/account/facebank',model_path='Weights/model_final.pth'):
        self.video = cv2.VideoCapture(0)
        (self.grabbed, self.frame) = self.video.read()
        self.thread=threading.Thread(target=self.update, args=())
        self.flag=True
        # recognition parameter
        self.threshold=threshold
        self.tta=tta
        self.score=c
        self.scale=scale
        self.min_face=min_face
        self.embedding=embedding
        self.facebank='media/account/{}/facebank'.format(username)
        self.model_path=model_path
        self.up=update_val
        self.device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        print('using ',self.device)
        self.face_detector=mtcnn_custom(device=self.device,p_model_path='MTCNN/weights/pnet_Weights',r_model_path='MTCNN/weights/rnet_Weights',o_model_path='MTCNN/weights/onet_Weights')
        print('face detector created...')

        #prepare pretrained model
        self.detect_model = MobileFaceNet(self.embedding).to(self.device)  # embeding size is 512 (feature vector)
        # self.check_point=torch.load('Weights/model_final_t.pth',map_location=lambda storage, loc: storage)
        # self.detect_model.load_state_dict(self.check_point['model_state_dict'])
        self.detect_model.load_state_dict(torch.load('Weights/model_final.pth',map_location=lambda storage, loc: storage))
        print('MobileFaceNet face detection model generated')
        self.detect_model.eval()

        #face bank update
        if self.up:
            self.targets, self.names = prepare_facebank(self.detect_model, path=self.facebank, tta=self.tta)
            print('facebank updated')
        else:
            self.targets, self.names = load_facebank(path=self.facebank)
            print('facebank loaded')
Esempio n. 2
0
 def update_facebank(self,img_list):
     for img_path in img_list:
         
         img=cv2.imread(img_path)
         bboxes,landmarks=self.face_detector.detect_all_net(image=img,mini_face=self.min_face)
         faces= Face_alignment(img,default_square=True,landmarks=landmarks)
         try:
             os.remove(img_path)
         except:
             print('fail to remove')
         cv2.imwrite(img_path,faces[0])
     self.targets,self.names=prepare_facebank(self.detect_model, path=self.facebank, tta=self.tta)
     print('new facebank uploaded !!...')
from flask_cors import CORS

device_0 = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
detect_model = MobileFaceNet(512).to(device_0)
detect_model.load_state_dict(
    torch.load('Weights/MobileFace_Net',
               map_location=lambda storage, loc: storage))
detect_model.eval()
target, name = load_facebank(path='facebank')
parser = argparse.ArgumentParser()
parser.add_argument('--miniface', default=10, type=int)
parser.add_argument('--scale', default=2, type=int)
parser.add_argument('--update', default=False, type=bool)
args = parser.parse_args()
if args.update:
    targets, names = prepare_facebank(detect_model, path='facebank')
    print('facebank updated')
else:
    targets, names = load_facebank(path='facebank')
    print('facebank loaded')


def mod_crop(image, scale=2):
    if len(image.shape) == 3:
        h = image.shape[0]
        w = image.shape[1]
        h = h - np.mod(h, scale)
        w = w - np.mod(w, scale)
        return image[0:h, 0:w, :]

    parser = argparse.ArgumentParser(description='for face verification')
    parser.add_argument('-img', '--img', help='upload image', default='images/Howard.jpg', type=str)
    parser.add_argument('-th','--threshold',help='threshold score to decide identical faces',default=60, type=float)
    parser.add_argument("-u", "--update", help="whether perform update the facebank",action="store_true", default= False)
    parser.add_argument("-tta", "--tta", help="whether test time augmentation",action="store_true", default= False)
    parser.add_argument("-c", "--score", help="whether show the confidence score",action="store_true",default= True )
    args = parser.parse_args()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    detect_model = MobileFaceNet(512).to(device)  # embeding size is 512 (feature vector)
    detect_model.load_state_dict(torch.load('Weights/MobileFace_Net', map_location=lambda storage, loc: storage))
    print('MobileFaceNet face detection model generated')
    detect_model.eval()

    if args.update:
        targets, names = prepare_facebank(detect_model, path='facebank', tta=args.tta)
        print('facebank updated')
    else:
        targets, names = load_facebank(path='facebank')
        print('facebank loaded')
        # targets: number of candidate x 512

    image = cv2.imread(args.img)

    t = time.time()

    bboxes, landmarks = create_mtcnn_net(image, 32, device, p_model_path='MTCNN/weights/pnet_Weights',
                                         r_model_path='MTCNN/weights/rnet_Weights',
                                         o_model_path='MTCNN/weights/onet_Weights')

    t1 = time.time() - t