def verify(): backend.clear_session() uploaded_files = request.files filenames = list(uploaded_files) # need to cast to list type file_items = list(uploaded_files.values()) # only one face on each image can be detected # thus it requires at least two images if len(uploaded_files) < 2: return jsonify(["don't upload less than 2 pictures"]) # extract faces faces = [extract_face(f) for f in file_items] # user may upload images without a face if any(elem is None for elem in faces): return jsonify(["don't upload pictures without faces"]) embeddings = get_embeddings(faces) results = [] for x in range(1, len(embeddings)): result_cosine = is_match(embeddings[0], embeddings[x]) # need to cast the compareration result to bool type # because threshold is a numpy bool which will cause # serialization problem when jsonify r = MatchingResult(x, result_cosine, bool(result_cosine <= threshold)) results.append(r) return jsonify([ob.__dict__ for ob in results])
def post(self): parse = reqparse.RequestParser() parse.add_argument('source', type=werkzeug.datastructures.FileStorage, location='files') parse.add_argument('target', type=werkzeug.datastructures.FileStorage, location='files') args = parse.parse_args() sourceImage = face.extract_face(args['source']) targetImage = face.extract_face(args['target']) distance = None with graph.as_default(): sourceEmbedding = face.get_embedding(model, sourceImage) targetEmbedding = face.get_embedding(model, targetImage) distance = facenet.distance(sourceEmbedding, targetEmbedding) return {'distance': distance.tolist()}
def faces(): if request.method == 'POST': f = request.files['image'] filename = f.filename path = os.path.join(UPLOAD_FLODER, filename) f.save(path) w = getwidth(path) px = extract_face(path) cv2.imwrite('./static/predict/{}'.format(filename), px) name = facenett(path, filename) folder_path = (r'C:\Users\Por\Desktop\proj\data') test = os.listdir(folder_path) for images in test: if images.endswith(".jpg"): os.remove(os.path.join(folder_path, images)) return render_template('faces.html', fileupload=True, img_name=filename, w=w, name=name) return render_template('faces.html', fileupload=False, img_name="freeai.png")
from keras.models import load_model import mtcnn import face import os from matplotlib import pyplot import facenet.src.facenet as facenet os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # load the model model = load_model('facenet_keras.h5') # summarize input and output shape print(model.inputs) print(model.outputs) image1 = face.extract_face("./Z.jpg") image2 = face.extract_face("./k.jpg") image3 = face.extract_face("./KK.jpg") embedding1 = face.get_embedding(model, image1) embedding2 = face.get_embedding(model, image2) embedding3 = face.get_embedding(model, image3) print("distance") print("Z-k") print(facenet.distance(embedding1, embedding2)) print("Z-KK") print(facenet.distance(embedding1, embedding3)) print("k-KK") print(facenet.distance(embedding2, embedding3)) print("DONE")
def swap_faces(FRAME, gray, faces): ''' :param FRAME: Single image frame from the camera :param gray: Grayscale image of FRAME :return swapp: Face swapped image ''' facial_landmarks = np.zeros((n_faces, n_markers, 2)) # create mat for m in range(0, n_faces): landmarks = predictor(gray, faces[m]) # dlib for n in range(marker_start, marker_end): x = landmarks.part(n).x y = landmarks.part(n).y # cv2.circle(FRAME, (x, y), 4, (0, 0, 255), -1) #visualize landmarks, BGR facial_landmarks[m, n - marker_start] = (x, y) # ----------------------Convex hull (convex.py)------------------------------------------ # Finds the convex hull of the faces, based on out own convex hull algorithm, (style of jarvis match) face1_hull = get_hull(facial_landmarks[0]) face2_hull = get_hull(facial_landmarks[1]) # ---------------------Extract face and mask (face.py)--------------------------------------- face1_mask, face1 = extract_face(face1_hull, FRAME) face2_mask, face2 = extract_face(face2_hull, FRAME) if DEBUG: cv2.imshow('face1', face1) cv2.imshow('face2', face2) cv2.imshow('mask1', face1_mask) cv2.imshow('mask2', face2_mask) # --------------------- Delaunay triangulation (face.py)------------------------ tri_face1_in_face2 = delaunay_triangulation(face1_hull, facial_landmarks[0], facial_landmarks[1], FRAME, DEBUG) tri_face2_in_face1 = delaunay_triangulation(face2_hull, facial_landmarks[1], facial_landmarks[0], FRAME, DEBUG) # --------------------Affine transform (affine_trans.py)---------------------------------------------- swapp = np.copy(FRAME) if DEBUG: cv2.imshow('before affine transform and swapping', swapp) cv2.waitKey() for i in range(len(tri_face1_in_face2[0])): morph_affine(tri_face1_in_face2[0][i], tri_face1_in_face2[1][i], FRAME, swapp, DEBUG) for i in range(len(tri_face2_in_face1[0])): morph_affine(tri_face2_in_face1[0][i], tri_face2_in_face1[1][i], FRAME, swapp, DEBUG) if DEBUG: cv2.imshow('after affine transform and swapping', swapp) cv2.waitKey() # --------------------- Blur face edge, Laplace blending (face.py)---------------------------- # Figure out blur amount: # Use facials landmarks 0 and 16 for width, see landmark_numbers.png width_face1 = abs(facial_landmarks[0][16][0] - facial_landmarks[0][0][0] ) # width of face in pixels in horizontal direction. width_face2 = abs(facial_landmarks[1][16][0] - facial_landmarks[1][0][0]) blur_size = int(((width_face1 + width_face2) / 2) * 0.5) if blur_size % 2 == 0: blur_size += 1 swapp = laplace_blend(FRAME, swapp, face1_mask, face2_mask, blur_size) return swapp