def __init__(self, prefix, epoch, ctx_id=0): # print('loading',prefix, epoch) # ctx = mx.gpu(ctx_id) # sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch) # all_layers = sym.get_internals() # sym = all_layers['fc1_output'] image_size = (112,112) self.image_size = image_size # model = mx.mod.Module(symbol=sym, context=ctx, label_names = None) # model.bind(for_training=False, data_shapes=[('data', (2, 3, image_size[0], image_size[1]))]) # model.set_params(arg_params, aux_params) # self.model = model src = np.array([ [30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366], [33.5493, 92.3655], [62.7299, 92.2041] ], dtype=np.float32 ) src[:,0] += 8.0 self.src = src self.face_recognizer = face_common.FaceRecognizer( True, "/mnt/hdd/PycharmProjects/insightface/detection/scrfd/scrfd_34g_n1/scrfd_34g_shape320x320.onnx", 320, 0.01, 0.4, True, "models/backbone.onnx", 0 )
num_face_landmark) return nme_loss data_path = "/mnt/hdd/IJB" target = 'IJBC' img_path = os.path.join(data_path, './%s/loose_crop' % target) img_list_path = os.path.join( data_path, './%s/meta/%s_name_5pts_score.txt' % (target, target.lower())) img_list = open(img_list_path) files = img_list.readlines() print('files:', len(files)) face_recognizer = face_common.FaceRecognizer( True, "model/retinaface_resnet50_480x480.onnx", 480, 0.01, 0.4, False, "", 0) result = 0 for img_index, each_line in enumerate(files): if img_index % 500 == 0: print('processing', img_index) name_lmk_score = each_line.strip().split(' ') img_name = os.path.join(img_path, name_lmk_score[0]) img = cv2.imread(img_name) target_lmk = np.array([float(x) for x in name_lmk_score[1:-1]], dtype=np.float32) target_lmk = target_lmk.reshape((5, 2)) detection = face_recognizer.Detect(img, False, False) landmarks = detection[0].landmarks predict_lanmark = []
from __future__ import print_function import os import cv2 import torch import face_common import numpy as np save_folder = "./prediction/" dataset_folder = "../widerface_val/images/" if __name__ == '__main__': face_recognizer = face_common.FaceRecognizer( True, "model/fd_mobilenet_origin.onnx", 480, 0.02, False, "") torch.set_grad_enabled(False) testset_folder = dataset_folder testset_list = dataset_folder[:-7] + "wider_val.txt" with open(testset_list, 'r') as fr: test_dataset = fr.read().split() num_images = len(test_dataset) for i, img_name in enumerate(test_dataset): ############################# Add face detection here####################################### image_path = testset_folder + img_name img = cv2.imread(image_path, cv2.IMREAD_COLOR) h, w = img.shape[:2] results = face_recognizer.Detect(img, False, False) dets = [] for result in results: dets.append( [result.x1, result.y1, result.x2, result.y2, result.confident]) dets = np.array(dets) ############################################################################################
from __future__ import print_function import os import cv2 import face_common import numpy as np save_folder = "./prediction/" dataset_folder = "../widerface_val/images/" if __name__ == '__main__': face_recognizer = face_common.FaceRecognizer( True, "model/yolov5s-face640x640.onnx", 640, 0.02, 0.5, False, "") testset_folder = dataset_folder testset_list = dataset_folder[:-7] + "wider_val.txt" with open(testset_list, 'r') as fr: test_dataset = fr.read().split() num_images = len(test_dataset) for i, img_name in enumerate(test_dataset): ############################# Add face detection here####################################### image_path = testset_folder + img_name img = cv2.imread(image_path, cv2.IMREAD_COLOR) h, w = img.shape[:2] results = face_recognizer.Detect(img, False, False) dets = [] for result in results: dets.append( [result.x1, result.y1, result.x2, result.y2, result.confident]) dets = np.array(dets) ############################################################################################ save_name = save_folder + img_name[:-4] + ".txt" dirname = os.path.dirname(save_name) if not os.path.isdir(dirname):
from __future__ import print_function import os import cv2 import face_common import numpy as np save_folder = "./prediction/" dataset_folder = "../widerface_val/images/" if __name__ == '__main__': face_recognizer = face_common.FaceRecognizer(True, "model/scrfd_10g_bnkps.onnx", 640, 0.02, False, "") testset_folder = dataset_folder testset_list = dataset_folder[:-7] + "wider_val.txt" with open(testset_list, 'r') as fr: test_dataset = fr.read().split() num_images = len(test_dataset) for i, img_name in enumerate(test_dataset): ############################# Add face detection here####################################### image_path = testset_folder + img_name img = cv2.imread(image_path, cv2.IMREAD_COLOR) h, w = img.shape[:2] results = face_recognizer.Detect(img, False, False) dets = [] for result in results: dets.append( [result.x1, result.y1, result.x2, result.y2, result.confident]) dets = np.array(dets) ############################################################################################ save_name = save_folder + img_name[:-4] + ".txt" dirname = os.path.dirname(save_name)
def NMELoss(predicted_landmark, target_landmark): # landmark is a numpy array which has shape [5, 2] num_face_landmark = 5 leye_nouse_vec = torch.from_numpy(target_landmark[0] - target_landmark[2]) reye_nouse_vec = torch.from_numpy(target_landmark[1] - target_landmark[2]) inter_occular_distance = LA.norm(leye_nouse_vec) + LA.norm(reye_nouse_vec) loss = nn.MSELoss(reduction="sum") preloss = loss(torch.from_numpy(predicted_landmark), torch.from_numpy(target_landmark)) nme_loss = torch.sqrt(preloss) / (inter_occular_distance * num_face_landmark) return nme_loss face_recognizer = face_common.FaceRecognizer( True, "/mnt/hdd/PycharmProjects/Pytorch_Retinaface/weights/model_origin.onnx", 320, 0.01, 0.4, False, "", 0 ) result = 0 number_faces = 0 for i in range(len(dataset)): img, target = dataset[i] for face in target: face = face.astype(np.int32) area = np.abs(face[2] - face[0]) * np.abs(face[3] - face[1]) if face[-1] != -1 and area > 12544: number_faces = number_faces + 1 target_lmk = face[4:14].astype(np.float32)