def genFaceEmbedding(self): # Grab the paths to the input images in our dataset print("[INFO] quantifying faces...") imagePaths = list(paths.list_images(self.args.dataset)) # Initialize the faces embedder embedding_model = face_model.FaceModel(self.image_size, self.model, self.threshold, self.det) # Initialize our lists of extracted facial embeddings and corresponding people names knownEmbeddings = [] knownNames = [] # Initialize the total number of faces processed total = 0 # Loop over the imagePaths for (i, imagePath) in enumerate(imagePaths): # extract the person name from the image path print("[INFO] processing image {}/{}".format( i + 1, len(imagePaths))) name = imagePath.split(os.path.sep)[-2] # load the image image = cv2.imread(imagePath) # convert face to RGB color nimg = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) nimg = np.transpose(nimg, (2, 0, 1)) # Get the face embedding vector face_embedding = embedding_model.get_feature(nimg) # add the name of the person + corresponding face # embedding to their respective list knownNames.append(name) knownEmbeddings.append(face_embedding) total += 1 print(total, " faces embedded") # save to output data = {"embeddings": knownEmbeddings, "names": knownNames} f = open(self.args.embeddings, "wb") f.write(pickle.dumps(data)) f.close()
def __init__(self): try: self.image_size = '112,112' self.model = "./insightface/models/model-y1-test2/model,0" self.threshold = 1.24 self.det = 0 self.model_filename = 'C:/Sasi/sasi/DLCVNLP/Face Recognition/FaceRecog1/src/model_data/mars-small128.pb' #self.model_filename = 'C:/Sasi/sasi/DLCVNLP/Face Recognition/FaceRecog1/src/faceEmbeddingModels/my_model.h5' self.encoder = gdet.create_box_encoder(self.model_filename, batch_size=1) # # Initialize detector self.detector = MTCNN() # Initialize faces embedding model self.embedding_model = face_model.FaceModel( self.image_size, self.model, self.threshold, self.det) self.embeddings = "./faceEmbeddingModels/embeddings.pickle" self.le = "./faceEmbeddingModels/le.pickle" # Load embeddings and labels self.data = pickle.loads(open(self.embeddings, "rb").read()) self.le = pickle.loads(open(self.le, "rb").read()) self.embeddings = np.array(self.data['embeddings']) self.labels = self.le.fit_transform(self.data['names']) # Load the classifier model self.model = load_model(ConfigurationsPOJO.clssfr_ModelPath) self.cosine_threshold = 0.8 self.proba_threshold = 0.85 self.comparing_num = 5 # # Tracker params self.trackers = [] self.texts = [] except Exception as e: print(e)