コード例 #1
0
ファイル: views.py プロジェクト: tanpo-1776017488/-
    def __init__(self,username='******',threshold=80,update_val=True,tta=False,c=True,scale=0.3,min_face=20,embedding=512,bank_path='media/account/facebank',model_path='Weights/model_final.pth'):
        self.video = cv2.VideoCapture(0)
        (self.grabbed, self.frame) = self.video.read()
        self.thread=threading.Thread(target=self.update, args=())
        self.flag=True
        # recognition parameter
        self.threshold=threshold
        self.tta=tta
        self.score=c
        self.scale=scale
        self.min_face=min_face
        self.embedding=embedding
        self.facebank='media/account/{}/facebank'.format(username)
        self.model_path=model_path
        self.up=update_val
        self.device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        print('using ',self.device)
        self.face_detector=mtcnn_custom(device=self.device,p_model_path='MTCNN/weights/pnet_Weights',r_model_path='MTCNN/weights/rnet_Weights',o_model_path='MTCNN/weights/onet_Weights')
        print('face detector created...')

        #prepare pretrained model
        self.detect_model = MobileFaceNet(self.embedding).to(self.device)  # embeding size is 512 (feature vector)
        # self.check_point=torch.load('Weights/model_final_t.pth',map_location=lambda storage, loc: storage)
        # self.detect_model.load_state_dict(self.check_point['model_state_dict'])
        self.detect_model.load_state_dict(torch.load('Weights/model_final.pth',map_location=lambda storage, loc: storage))
        print('MobileFaceNet face detection model generated')
        self.detect_model.eval()

        #face bank update
        if self.up:
            self.targets, self.names = prepare_facebank(self.detect_model, path=self.facebank, tta=self.tta)
            print('facebank updated')
        else:
            self.targets, self.names = load_facebank(path=self.facebank)
            print('facebank loaded')
コード例 #2
0
import sys
import os
import io
import json
from torchvision import models
from PIL import Image
from flask import Flask, jsonify, request
from flask_cors import CORS

device_0 = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
detect_model = MobileFaceNet(512).to(device_0)
detect_model.load_state_dict(
    torch.load('Weights/MobileFace_Net',
               map_location=lambda storage, loc: storage))
detect_model.eval()
target, name = load_facebank(path='facebank')
parser = argparse.ArgumentParser()
parser.add_argument('--miniface', default=10, type=int)
parser.add_argument('--scale', default=2, type=int)
parser.add_argument('--update', default=False, type=bool)
args = parser.parse_args()
if args.update:
    targets, names = prepare_facebank(detect_model, path='facebank')
    print('facebank updated')
else:
    targets, names = load_facebank(path='facebank')
    print('facebank loaded')


def mod_crop(image, scale=2):
    if len(image.shape) == 3: