コード例 #1
0
ファイル: views.py プロジェクト: tanpo-1776017488/-
    def __init__(self,username='******',threshold=80,update_val=True,tta=False,c=True,scale=0.3,min_face=20,embedding=512,bank_path='media/account/facebank',model_path='Weights/model_final.pth'):
        self.video = cv2.VideoCapture(0)
        (self.grabbed, self.frame) = self.video.read()
        self.thread=threading.Thread(target=self.update, args=())
        self.flag=True
        # recognition parameter
        self.threshold=threshold
        self.tta=tta
        self.score=c
        self.scale=scale
        self.min_face=min_face
        self.embedding=embedding
        self.facebank='media/account/{}/facebank'.format(username)
        self.model_path=model_path
        self.up=update_val
        self.device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        print('using ',self.device)
        self.face_detector=mtcnn_custom(device=self.device,p_model_path='MTCNN/weights/pnet_Weights',r_model_path='MTCNN/weights/rnet_Weights',o_model_path='MTCNN/weights/onet_Weights')
        print('face detector created...')

        #prepare pretrained model
        self.detect_model = MobileFaceNet(self.embedding).to(self.device)  # embeding size is 512 (feature vector)
        # self.check_point=torch.load('Weights/model_final_t.pth',map_location=lambda storage, loc: storage)
        # self.detect_model.load_state_dict(self.check_point['model_state_dict'])
        self.detect_model.load_state_dict(torch.load('Weights/model_final.pth',map_location=lambda storage, loc: storage))
        print('MobileFaceNet face detection model generated')
        self.detect_model.eval()

        #face bank update
        if self.up:
            self.targets, self.names = prepare_facebank(self.detect_model, path=self.facebank, tta=self.tta)
            print('facebank updated')
        else:
            self.targets, self.names = load_facebank(path=self.facebank)
            print('facebank loaded')
コード例 #2
0
def get_detect_model():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    detect_model = MobileFaceNet(512).to(
        device)  # embeding size is 512 (feature vector)
    detect_model.load_state_dict(
        torch.load('Weights/MobileFace_Net.pt',
                   map_location=lambda storage, loc: storage))
    print('MobileFaceNet face detection model generated')
    detect_model.eval()

    return detect_model
コード例 #3
0
from flask_script import Manager, Server
from PIL import Image, ImageDraw, ImageFont
from torchvision import transforms as trans
import torch
import argparse
import sys
import os
import io
import json
from torchvision import models
from PIL import Image
from flask import Flask, jsonify, request
from flask_cors import CORS

device_0 = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
detect_model = MobileFaceNet(512).to(device_0)
detect_model.load_state_dict(
    torch.load('Weights/MobileFace_Net',
               map_location=lambda storage, loc: storage))
detect_model.eval()
target, name = load_facebank(path='facebank')
parser = argparse.ArgumentParser()
parser.add_argument('--miniface', default=10, type=int)
parser.add_argument('--scale', default=2, type=int)
parser.add_argument('--update', default=False, type=bool)
args = parser.parse_args()
if args.update:
    targets, names = prepare_facebank(detect_model, path='facebank')
    print('facebank updated')
else:
    targets, names = load_facebank(path='facebank')
コード例 #4
0
ファイル: facebank.py プロジェクト: tanpo-1776017488/-
    embeddings = torch.cat(embeddings)
    names = np.array(names)
    torch.save(embeddings, os.path.join(path, 'facebank.pth'))
    np.save(os.path.join(path, 'names'), names)

    return embeddings, names


def load_facebank(path='facebank'):
    data_path = Path(path)
    embeddings = torch.load(data_path / 'facebank.pth')
    names = np.load(data_path / 'names.npy')
    return embeddings, names


if __name__ == '__main__':

    detect_model = MobileFaceNet(512).to(
        device)  # embeding size is 512 (feature vector)
    detect_model.load_state_dict(
        torch.load('Weights/MobileFace_Net',
                   map_location=lambda storage, loc: storage))
    print('MobileFaceNet face detection model generated')
    detect_model.eval()

    embeddings, names = prepare_facebank(detect_model,
                                         path='facebank',
                                         tta=True)
    print(embeddings.shape)
    print(names)
コード例 #5
0
ファイル: views.py プロジェクト: tanpo-1776017488/-
class VideoCamera(object):
    def __init__(self,username='******',threshold=80,update_val=True,tta=False,c=True,scale=0.3,min_face=20,embedding=512,bank_path='media/account/facebank',model_path='Weights/model_final.pth'):
        self.video = cv2.VideoCapture(0)
        (self.grabbed, self.frame) = self.video.read()
        self.thread=threading.Thread(target=self.update, args=())
        self.flag=True
        # recognition parameter
        self.threshold=threshold
        self.tta=tta
        self.score=c
        self.scale=scale
        self.min_face=min_face
        self.embedding=embedding
        self.facebank='media/account/{}/facebank'.format(username)
        self.model_path=model_path
        self.up=update_val
        self.device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        print('using ',self.device)
        self.face_detector=mtcnn_custom(device=self.device,p_model_path='MTCNN/weights/pnet_Weights',r_model_path='MTCNN/weights/rnet_Weights',o_model_path='MTCNN/weights/onet_Weights')
        print('face detector created...')

        #prepare pretrained model
        self.detect_model = MobileFaceNet(self.embedding).to(self.device)  # embeding size is 512 (feature vector)
        # self.check_point=torch.load('Weights/model_final_t.pth',map_location=lambda storage, loc: storage)
        # self.detect_model.load_state_dict(self.check_point['model_state_dict'])
        self.detect_model.load_state_dict(torch.load('Weights/model_final.pth',map_location=lambda storage, loc: storage))
        print('MobileFaceNet face detection model generated')
        self.detect_model.eval()

        #face bank update
        if self.up:
            self.targets, self.names = prepare_facebank(self.detect_model, path=self.facebank, tta=self.tta)
            print('facebank updated')
        else:
            self.targets, self.names = load_facebank(path=self.facebank)
            print('facebank loaded')
            # targets: number of candidate x 512
        

    def __del__(self):
        self.video.release()
    
    def update_facebank(self,img_list):
        for img_path in img_list:
            
            img=cv2.imread(img_path)
            bboxes,landmarks=self.face_detector.detect_all_net(image=img,mini_face=self.min_face)
            faces= Face_alignment(img,default_square=True,landmarks=landmarks)
            try:
                os.remove(img_path)
            except:
                print('fail to remove')
            cv2.imwrite(img_path,faces[0])
        self.targets,self.names=prepare_facebank(self.detect_model, path=self.facebank, tta=self.tta)
        print('new facebank uploaded !!...')

    def get_frame(self): #여기다가 face detection, recognition기능을 넣으면 문제없음.
        
        frame = self.frame #thread가 update하는 이미지를 가져 옴.
        
        if frame is not None and self.flag is True:
            try:
                start_time=time.time()
                input=resize_image(frame,self.scale)# input size를 줄여줌으로 speed up 가능
                #print('get bboxes')
                # bboxes, landmarks = create_mtcnn_net(input, self.min_face, self.device, p_model_path='MTCNN/weights/pnet_Weights',
                #                                             r_model_path='MTCNN/weights/rnet_Weights',
                #                                             o_model_path='MTCNN/weights/onet_Weights')
                #print('sucess bbox')
                
                bboxes,landmarks=self.face_detector.detect_all_net(image=input,mini_face=self.min_face)
                
                if bboxes != []:
                    bboxes=bboxes/self.scale
                    landmarks=landmarks/self.scale

                faces= Face_alignment(frame,default_square=True,landmarks=landmarks)

                

                embs=[]
                test_transform = trans.Compose([
                                            trans.ToTensor(),
                                            trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])

                for img in faces:
                    if self.tta:
                        mirror = cv2.flip(img,1)
                        emb = self.detect_model(test_transform(img).to(self.device).unsqueeze(0))
                        emb_mirror = self.detect_model(test_transform(mirror).to(self.device).unsqueeze(0))
                        embs.append(l2_norm(emb + emb_mirror))
                    else:
                        embs.append(self.detect_model(test_transform(img).to(self.device).unsqueeze(0)))
                    

                source_embs=torch.cat(embs)
                diff=source_embs.unsqueeze(-1) - self.targets.transpose(1, 0).unsqueeze(0) # i.e
                dist = torch.sum(torch.pow(diff, 2), dim=1) # number of detected faces x numer of target faces
                minimum, min_idx = torch.min(dist, dim=1) # min and idx for each row
                min_idx[minimum > ((self.threshold-156)/(-80))] = -1  # if no match, set idx to -1
                score = minimum
                results = min_idx
                score_100 = torch.clamp(score*-80+156,0,100)
                FPS=1.0/(time.time()-start_time)
                
                cv2.putText(frame,'FPS : {:.1f}'.format(FPS),(10,15),cv2.FONT_HERSHEY_DUPLEX,0.75,(255,0,255))
                for i,b in enumerate(bboxes):
                    b=b.astype('uint32')
                    cv2.rectangle(frame,(b[0],b[1]),(b[2],b[3]),(0,255,0),1)
                    try:
                        if self.names[results[i]+1]=='Unknown': #mosic func
                            #print('detect unknwon')
                            face_region=frame[b[1]:b[3],b[0]:b[2]]
                            face_region=cv2.blur(face_region,(30,30))
                            frame[b[1]:b[3],b[0]:b[2]]=face_region

                    except:
                        pass
                    # development version
                    # if self.score:
                    #     cv2.putText(frame,self.names[results[i]+1]+' score:{:.0f}'.format(score_100[i]),(int(b[0]),int(b[1]-25)),cv2.FONT_ITALIC,1,(255,255,0))
                    # else:
                    #     cv2.putText(frame,self.names[results[i]+1],(int(b[0]),int(b[1]-25)),cv2.FONT_ITALIC,1,(255,255,0))
                    
            except:
                pass

            _, jpeg = cv2.imencode('.jpg',frame)
            return jpeg.tobytes()
            
            

    def update(self): # thread를 사용하여 지속적으로 cap.read반복
        
        while True:
            (self.grabbed, self.frame) = self.video.read()
            if self.grabbed==False or self.flag==False:
                break

    def gen(self):  
        while True:
            if self.flag==False:
                break

            frame = self.get_frame()

            if frame is None:
                continue
            yield(b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')            
コード例 #6
0
    )
    parser.add_argument('--flip',
                        type=str,
                        default=True,
                        help='if flip the image with time augmentation')
    parser.add_argument('--drop_path_prob',
                        type=float,
                        default=0,
                        help='drop path probability')
    args = parser.parse_args()

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    from load_ms1m_lfw_agedb_cfp_dataset import *
    dataloaders, dataset_sizes, dataset = load_glint_data_train_from_lmdb(
        args.batch_size, dataset=args.dataset)
    model = MobileFaceNet(args.feature_dim).to(
        device)  # embeding size is 512 (feature vector)

    from face_model import ArcMarginProduct
    margin = ArcMarginProduct(512, 180855).to(device)
    model.load_state_dict(
        torch.load(
            '/data/face_recognition/Mobilefacenet/arc_center_focal/Iter_555000_model.ckpt'
        )['net_state_dict'])
    # margin.load_state_dict(torch.load('/data/face_recognition/Mobilefacenet/label_smooth_ckpt/Iter_290000_margin.ckpt')['net_state_dict'])

    criterion_focal = FocalLoss().to(device)  #FocalLoss()
    criterion_center = CenterLoss(num_classes=180855,
                                  feat_dim=512,
                                  use_gpu=True).to(device)
    optimizer_centloss = torch.optim.SGD(criterion_center.parameters(), lr=0.5)
コード例 #7
0
    parser.add_argument(
        '--method',
        type=str,
        default='l2_distance',
        help='methold to evaluate feature similarity, l2_distance, cos_distance'
    )
    parser.add_argument('--flip',
                        type=str,
                        default=True,
                        help='if flip the image with time augmentation')
    args = parser.parse_args()

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    dataloaders, dataset_sizes, dataset = load_data(args.batch_size,
                                                    dataset=args.dataset)
    model = MobileFaceNet(args.feature_dim).to(
        device)  # embeding size is 512 (feature vector)
    print('MobileFaceNet face detection model loaded')
    margin = Arcface(embedding_size=args.feature_dim,
                     classnum=int(dataset['train'].class_nums),
                     s=32.,
                     m=0.5).to(device)

    criterion = torch.nn.CrossEntropyLoss().to(device)
    optimizer_ft = optim.SGD([{
        'params': model.parameters(),
        'weight_decay': 5e-4
    }, {
        'params': margin.parameters(),
        'weight_decay': 5e-4
    }],
                             lr=0.01,
コード例 #8
0
    )
    parser.add_argument('--flip',
                        type=str,
                        default=True,
                        help='if flip the image with time augmentation')
    parser.add_argument('--drop_path_prob',
                        type=float,
                        default=0,
                        help='drop path probability')
    args = parser.parse_args()

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    from load_ms1m_lfw_agedb_cfp_dataset import load_data_train_fix_from_lmdb
    dataloaders, dataset_sizes, dataset = load_data_train_fix_from_lmdb(
        args.batch_size, dataset=args.dataset)
    model = MobileFaceNet(args.feature_dim).to(
        device)  # embeding size is 512 (feature vector)
    # from V100_python.genotypes import PC_DARTS_image
    # model = Network(48, 85742, 20,False, PC_DARTS_image).to(device)
    # print(model)

    # model.load_state_dict(checkpoint['net_state_dict'])
    # print(checkpoint)
    print('MobileFaceNet face detection model loaded')
    # margin = Arcface(embedding_size=args.feature_dim, classnum=int(dataset['train'].class_nums),  s=32., m=0.5).to(device)
    from face_model import ArcMarginProduct
    margin = ArcMarginProduct(512, 85742).to(device)
    # margin = Softmax(args.feature_dim,85742).to(device)
    # model.load_state_dict(checkpoint['net_state_dict'])
    checkpoint = torch.load(
        '/data/face_recognition/Mobilefacenet/saving_Faces_emore_ckpt_arcface_64_randombrightness/Iter_125000_model.ckpt'
    )
コード例 #9
0
    
    parser = argparse.ArgumentParser(description='Face_Detection_Training')
    parser.add_argument('--dataset', type=str, default='Faces_emore', help='Training dataset: CASIA, Faces_emore')
    parser.add_argument('--feature_dim', type=int, default=512, help='the feature dimension output')
    parser.add_argument('--batch_size', type=int, default=200, help='batch size for training and evaluation')
    parser.add_argument('--epoch', type=int, default=20, help='number of epoches for training')
    parser.add_argument('--method', type=str, default='l2_distance', 
                            help='methold to evaluate feature similarity, l2_distance, cos_distance')
    parser.add_argument('--flip', type=str, default=True, help='if flip the image with time augmentation')
    parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')
    args = parser.parse_args()
    
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    from load_ms1m_lfw_agedb_cfp_dataset import load_data_train_fix_from_lmdb  
    dataloaders , dataset_sizes, dataset = load_data_train_fix_from_lmdb(args.batch_size, dataset = args.dataset)
    model = MobileFaceNet(args.feature_dim).to(device)  # embeding size is 512 (feature vector)
    
    
    from face_model import ArcMarginProduct
    margin = ArcMarginProduct(512,85742).to(device)
    model.load_state_dict(torch.load('/data/face_recognition/Mobilefacenet/saving_Faces_emore_ckpt/Iter_060000_model.ckpt')['net_state_dict'])
    margin.load_state_dict(torch.load('/data/face_recognition/Mobilefacenet/saving_Faces_emore_ckpt/Iter_060000_margin.ckpt')['net_state_dict'])

    if torch.cuda.device_count()>1:
        model = torch.nn.DataParallel(model)
        margin = torch.nn.DataParallel(margin)
        checkpoint=torch.load('./saving_Faces_emore_ckpt/Iter_410000_model.ckpt')
        # /data/face_recognition/Mobilefacenet/saving_Faces_emore_ckpt/Iter_410000_margin.ckpt
        model.load_state_dict(checkpoint['net_state_dict'])
        margin.load_state_dict(torch.load('./saving_Faces_emore_ckpt/Iter_410000_margin.ckpt')['net_state_dict'])
    # else:
コード例 #10
0
                   'AgeDB30': DataLoaderX(dataset_AgeDB30, batch_size=batch_size, shuffle=False, num_workers=2)}
    
    dataset = {'LFW': dataset_LFW,'CFP_FP': dataset_CFP_FP, 'AgeDB30': dataset_AgeDB30,'privacy':dataset_privacy}
    
    dataset_sizes = {'LFW': len(dataset_LFW),'privacy':len(dataset_privacy),
                     'CFP_FP': len(dataset_CFP_FP), 'AgeDB30': len(dataset_AgeDB30)}
    
    print('training and validation data loaded')
    
    return dataloaders, dataset_sizes, dataset

if __name__ == '__main__':
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    from face_model import MobileFaceNet, l2_norm
    dataloaders , dataset_sizes, dataset = load_test_data(50)
    model = MobileFaceNet(512).to(device)
    state_dict = torch.load('arc_center_focal\Iter_555000_model.ckpt')['net_state_dict']
    # from collections import OrderedDict
    # new_state_dict = OrderedDict()
    # for k, v in state_dict.items():
    #     name = k[7:] # remove `module.`
    #     new_state_dict[name] = v
    # load params
    model.load_state_dict(state_dict)
    # print(state_dict)
    # model.drop_path_prob = 0
    model.eval()
    
    for phase in ['LFW', 'CFP_FP', 'AgeDB30','privacy']:                 
        featureLs, featureRs = getFeature(model, dataloaders[phase], device, flip = True)
        # ACCs, threshold = evaluation_10_fold(featureLs, featureRs, dataset[phase], method = 'l2_distance')
コード例 #11
0
    )
    parser.add_argument('--flip',
                        type=str,
                        default=True,
                        help='if flip the image with time augmentation')
    parser.add_argument('--drop_path_prob',
                        type=float,
                        default=0,
                        help='drop path probability')
    args = parser.parse_args()

    cpu_device = torch.device('cpu')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    dataloaders, dataset_sizes, dataset = load_glint_data_train_from_lmdb(
        args.batch_size, dataset=args.dataset)
    model = MobileFaceNet(args.feature_dim)
    model.load_state_dict(
        torch.load(
            '/data/face_recognition/Mobilefacenet/acr_center_focal_0.03/Iter_455000_model.ckpt',
            map_location='cpu')['net_state_dict'])
    model = model.to(device)

    margin = ArcMarginProduct(512, 180855).to(cpu_device)
    margin.load_state_dict(
        torch.load(
            '/data/face_recognition/Mobilefacenet/acr_center_focal_0.03/Iter_glint_margin.ckpt',
            map_location='cpu')['net_state_dict'])
    margin = margin.to(device)
    criterion_focal = FocalLoss().to(device)  #FocalLoss()
    criterion_center = CenterLoss(num_classes=180855,
                                  feat_dim=512,
コード例 #12
0
ファイル: cam_demo.py プロジェクト: tanpo-1776017488/-
        '--mini_face',
        dest='mini_face',
        help=
        "Minimum face to be detected. derease to increase accuracy. Increase to increase speed",
        default=20,
        type=int)
    parser.add_argument('-e',
                        '--embedding',
                        type=int,
                        default=512,
                        help='embedding size')
    args = parser.parse_args()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    #prepare pretrained model
    detect_model = MobileFaceNet(args.embedding).to(
        device)  # embeding size is 512 (feature vector)
    check_point = torch.load('Weights/model_final.pth',
                             map_location=lambda storage, loc: storage)
    detect_model.load_state_dict(check_point['model_state_dict'])
    print('MobileFaceNet face detection model generated')
    detect_model.eval()

    #face bank update
    if args.update:
        targets, names = prepare_facebank(detect_model,
                                          path='facebank',
                                          tta=args.tta)
        print('facebank updated')
    else:
        targets, names = load_facebank(path='facebank')
        print('facebank loaded')