def liveness_detector(frame): image_cropper = CropImage() model_dir = './resources/liveness_model' image_bbox = model_test.get_bbox(frame) if image_bbox[0] == 0 and image_bbox[1] == 0 and image_bbox[2] == 1 and image_bbox[3] == 1: return False prediction = np.zeros((1, 3)) test_speed = 0 # sum the prediction from single model's result for model_name in os.listdir(model_dir): h_input, w_input, model_type, scale = parse_model_name(model_name) param = { "org_img": frame, "bbox": image_bbox, "scale": scale, "out_w": w_input, "out_h": h_input, "crop": True, } if scale is None: param["crop"] = False img = image_cropper.crop(**param) prediction += model_test.predict(img, os.path.join(model_dir, model_name)) # label: face is true or fake label = np.argmax(prediction) # value: the score of prediction value = prediction[0][label] if label == 1 and value > 0.7: return True else: return False
def preprocessing(model_dir, device_id, num_classes, src_dir, dst_dir, threshold): face_model = FaceModel() model_test = AntiSpoofPredict(device_id) image_cropper = CropImage() onlyfiles = [ os.path.join(path, name) for path, subdirs, files in os.walk(src_dir) for name in files ] for file_path in onlyfiles: file_name = os.path.basename(file_path) image = cv2.imread(file_path) image_bbox = face_model.get_bbox(image) if image_bbox == [0, 0, 1, 1]: dst_path_image = join(dst_dir, "not_detect_face") if not exists(dst_path_image): os.makedirs(dst_path_image) cv2.imwrite(join(dst_path_image, file_name), image) else: image_cropped = [] prediction = np.zeros((1, num_classes)) count_model = 0 for model_name in os.listdir(model_dir): h_input, w_input, model_type, scale = parse_model_name( model_name) param = { "org_img": image, "bbox": image_bbox, "scale": scale, "out_w": w_input, "out_h": h_input, "crop": True, } if scale is None: param["crop"] = False img = image_cropper.crop(**param) image_cropped.append({"scale": str(scale), "image": img}) if threshold > 0: prediction += model_test.predict( img, os.path.join(model_dir, model_name)) count_model = count_model + 1 directory = dst_dir if threshold > 0: label = np.argmax(prediction) value = prediction[0][label] / count_model directory = join(dst_dir, str(label)) if (threshold > 0 and value >= threshold) or (threshold == 0): for cropped in image_cropped: dst_path_image = join(directory, cropped["scale"]) if not exists(dst_path_image): os.makedirs(dst_path_image) cv2.imwrite(join(dst_path_image, file_name), cropped["image"])
def test(image_name, model_dir, device_id): model_test = AntiSpoofPredict(device_id) image_cropper = CropImage() image = cv2.imread(SAMPLE_IMAGE_PATH + image_name) result = check_image(image) if result is False: return image_bbox = model_test.get_bbox(image) prediction = np.zeros((1, 3)) test_speed = 0 # sum the prediction from single model's result for model_name in os.listdir(model_dir): h_input, w_input, model_type, scale = parse_model_name(model_name) param = { "org_img": image, "bbox": image_bbox, "scale": scale, "out_w": w_input, "out_h": h_input, "crop": True, } if scale is None: param["crop"] = False img = image_cropper.crop(**param) start = time.time() prediction += model_test.predict(img, os.path.join(model_dir, model_name)) test_speed += time.time()-start # draw result of prediction label = np.argmax(prediction) value = prediction[0][label]/2 if label == 1: print("Image '{}' is Real Face. Score: {:.2f}.".format(image_name, value)) result_text = "RealFace Score: {:.2f}".format(value) color = (255, 0, 0) else: print("Image '{}' is Fake Face. Score: {:.2f}.".format(image_name, value)) result_text = "FakeFace Score: {:.2f}".format(value) color = (0, 0, 255) print("Prediction cost {:.2f} ms".format(test_speed)) cv2.rectangle( image, (image_bbox[0], image_bbox[1]), (image_bbox[0] + image_bbox[2], image_bbox[1] + image_bbox[3]), color, 2) cv2.putText( image, result_text, (image_bbox[0], image_bbox[1] - 5), cv2.FONT_HERSHEY_COMPLEX, 0.5*image.shape[0]/1024, color) format_ = os.path.splitext(image_name)[-1] result_image_name = image_name.replace(format_, "_result" + format_) cv2.imwrite(SAMPLE_IMAGE_PATH + result_image_name, image)
def test(image_name, model_dir, device_id): ## If input is an image from a folder ## # image_name = cv2.imread(image_name) model_test = AntiSpoofPredict(device_id) image_cropper = CropImage() image_bbox = model_test.get_bbox(image_name) prediction = np.zeros((1, 3)) test_speed = 0 for model_name in os.listdir(model_dir): h_input, w_input, model_type, scale = parse_model_name(model_name) param = { "org_img": image_name, "bbox": image_bbox, "scale": scale, "out_w": w_input, "out_h": h_input, "crop": True, } if scale is None: param["crop"] = False img = image_cropper.crop(**param) start = time.time() prediction += model_test.predict(img, os.path.join(model_dir, model_name)) test_speed += time.time() - start # draw result of prediction label = np.argmax(prediction) value = prediction[0][label] / 2 if label == 1 and value >= 0.6: print("Real Face.") result_text = "RealFace" color = (255, 0, 0) else: print("Fake Face.") result_text = "FakeFace" color = (0, 0, 255) print("Prediction speed {:.2f} s".format(test_speed))
if __name__ == "__main__": if device_id >=0: # prepare environments ctx, queue, mf, prg = prepare_environment() else: ctx, queue, mf, prg = None, None, None, None img_heights = list(json.loads(img_heights)) # load model # face_model = mytest.face_boxes_model() # face_model.load_face_model() model_test = AntiSpoofPredict(device_id) image_cropper = CropImage() file_images = os.listdir(folder_int) results = [] # read image for f in file_images: link_image = os.path.join(folder_int, f) img = cv2.imread(link_image) if img is None: print("can't read image") results.append([link_image, -1]) continue # bbox can be None if detected fail # if bbox is not None:
def webcam(model_dir, device_id): model_test = AntiSpoofPredict(device_id) image_cropper = CropImage() color = (255, 0, 0) cap = cv2.VideoCapture(0) print("[INFO] loading model...") net = cv2.dnn.readNetFromCaffe('deploy.prototxt.txt', 'res10_300x300_ssd_iter_140000.caffemodel') print("[INFO] starting video stream...") max_num_faces = 20 prev_frame_time = 0 while True: ret, frame = cap.read() if ret is None: break (h, w) = frame.shape[:2] frame = imutils.resize(frame, width=640) new_frame_time = time.time() ### Use mobileFacenet # image_bbox = model_test.get_bbox(frame) # print(image_bbox) blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0)) net.setInput(blob) detections = net.forward() model_name = '2.7_80x80_MiniFASNetV2.pth' for i in range(0, max_num_faces): confidence = detections[0, 0, i, 2] if confidence < 0.5: continue prediction = np.zeros((1, 3)) box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) (startX, startY, endX, endY) = box.astype("int") image_bbox = (startX, startY, endX - startX, endY - startY) start_time = time.time() for model_name in os.listdir(model_dir): h_input, w_input, model_type, scale = parse_model_name( model_name) param = { "org_img": frame, "bbox": image_bbox, "scale": scale, "out_w": w_input, "out_h": h_input, "crop": True, } if scale is None: param["crop"] = False img = image_cropper.crop(**param) prediction += model_test.predict( img, os.path.join(model_dir, model_name)) label = np.argmax(prediction) value = prediction[0][label] / 2 # print(prediction[0][label]) if label == 1: # print("Image '{}' is Real Face. Score: {:.2f}.".format(image_name, value)) result_text = "RealFace Score: {:.2f}".format(value) color = (255, 0, 0) else: # print("Image '{}' is Fake Face. Score: {:.2f}.".format(image_name, value)) result_text = "FakeFace Score: {:.2f}".format(value) color = (0, 0, 255) end_time = time.time() cv2.rectangle( frame, (image_bbox[0], image_bbox[1]), (image_bbox[0] + image_bbox[2], image_bbox[1] + image_bbox[3]), color, 2) cv2.putText(frame, result_text, (image_bbox[0], image_bbox[1] - 5), cv2.FONT_HERSHEY_COMPLEX, 1 * frame.shape[0] / 1024, color) fps = 1 / (end_time - start_time) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(frame, str(fps), (7, 70), font, 3, (100, 255, 0), 3, cv2.LINE_AA) cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF if key == ord('q'): break
def test(model_dir, device_id, num_classes, src_dir, dst_dir, draw_bbox): face_model = FaceModel() model_test = AntiSpoofPredict(device_id) image_cropper = CropImage() onlyfiles = [ os.path.join(path, name) for path, subdirs, files in os.walk(src_dir) for name in files ] for file_path in onlyfiles: image_name = os.path.basename(file_path) image = cv2.imread(file_path) image_bbox = face_model.get_bbox(image) print(image_bbox) if image_bbox == [0, 0, 1, 1]: dst_path_image = join(dst_dir, "not_detect_face") if not exists(dst_path_image): os.makedirs(dst_path_image) cv2.imwrite(join(dst_path_image, image_name), image) else: # if you have n clasees => prediction = np.zeros((1, n)) prediction = np.zeros((1, num_classes)) test_speed = 0 # sum the prediction from single model's result count_model = 0 for model_name in os.listdir(model_dir): h_input, w_input, model_type, scale = parse_model_name( model_name) param = { "org_img": image, "bbox": image_bbox, "scale": scale, "out_w": w_input, "out_h": h_input, "crop": True, } if scale is None: param["crop"] = False img = image_cropper.crop(**param) start = time.time() prediction += model_test.predict( img, os.path.join(model_dir, model_name)) count_model = count_model + 1 test_speed += time.time() - start # draw result of prediction label = np.argmax(prediction) value = prediction[0][label] / count_model if label == 1: label_text = "Image '{}' is Real Face. Score: {:.2f}.".format( image_name, value) result_text = "RealFace Score: {:.2f}".format(value) color = (255, 0, 0) else: label_text = "Image '{}' is Fake Face. Score: {:.2f}.".format( image_name, value) result_text = "FakeFace Score: {:.2f}".format(value) color = (0, 0, 255) print(label_text) print("Prediction cost {:.2f} s".format(test_speed)) if draw_bbox == True: cv2.rectangle(image, (image_bbox[0], image_bbox[1]), (image_bbox[0] + image_bbox[2], image_bbox[1] + image_bbox[3]), color, 2) cv2.putText(image, result_text, (image_bbox[0], image_bbox[1] - 5), cv2.FONT_HERSHEY_COMPLEX, 0.5 * image.shape[0] / 1024, color) dst_path_image = join(dst_dir, str(label)) if not exists(dst_path_image): os.makedirs(dst_path_image) format_ = os.path.splitext(image_name)[-1] result_image_name = image_name.replace(format_, "_result" + format_) cv2.imwrite(os.path.join(dst_path_image, result_image_name), image)