Ejemplo n.º 1
0
def test_faceswap(person, model_path, test_path, save_path):
    mtcnn_weights_dir = "./mtcnn_weights/"
    fd = MTCNNFaceDetector(sess=K.get_session(), model_path=mtcnn_weights_dir)

    da_config, arch_config, loss_weights, loss_config = get_model_params()

    model = FaceswapGANModel(**arch_config)
    model.load_weights(path=model_path)

    ftrans = FaceTransformer()
    ftrans.set_model(model)

    # Read input image
    test_imgs = glob.glob(test_path + '/*.jpg')
    Path(save_path).mkdir(parents=True, exist_ok=True)

    for test_img in test_imgs:
        input_img = plt.imread(test_img)[..., :3]

        if input_img.dtype == np.float32:
            print("input_img has dtype np.float32 (perhaps the image format is PNG). Scale it to uint8.")
            input_img = (input_img * 255).astype(np.uint8)

        # Display detected face
        faces, lms = fd.detect_face(input_img)
        if len(faces) == 0:
            continue
        x0, y1, x1, y0, _ = faces[0]
        det_face_im = input_img[int(x0):int(x1), int(y0):int(y1), :]
        try:
            src_landmarks = get_src_landmarks(x0, x1, y0, y1, lms)
            tar_landmarks = get_tar_landmarks(det_face_im)
            aligned_det_face_im = landmarks_match_mtcnn(det_face_im, src_landmarks, tar_landmarks)
        except:
            print("An error occured during face alignment.")
            aligned_det_face_im = det_face_im
        # plt.imshow(aligned_det_face_im)
        # Transform detected face
        result_img, result_rgb, result_mask = ftrans.transform(
            aligned_det_face_im,
            direction="BtoA",
            roi_coverage=0.93,
            color_correction="adain_xyz",
            IMAGE_SHAPE=(RESOLUTION, RESOLUTION, 3)
        )
        try:
            result_img = landmarks_match_mtcnn(result_img, tar_landmarks, src_landmarks)
            result_rgb = landmarks_match_mtcnn(result_rgb, tar_landmarks, src_landmarks)
            result_mask = landmarks_match_mtcnn(result_mask, tar_landmarks, src_landmarks)
        except:
            print("An error occured during face alignment.")
            pass

        result_input_img = input_img.copy()
        result_input_img[int(x0):int(x1), int(y0):int(y1), :] = result_mask.astype(np.float32) / 255 * result_rgb + \
                                                                (1 - result_mask.astype(
                                                                    np.float32) / 255) * result_input_img[int(x0):int(x1),
                                                                                         int(y0):int(y1), :]

        img_name = os.path.basename(test_img)
        plt.imshow(result_input_img)
        plt.imsave(f'{save_path}/{img_name}', result_input_img)
Ejemplo n.º 2
0
        _recalPos.append(
            [int(_pos[0] / 180 * imagew),
             int(_pos[1] / 108 * imageh)])
    return _recalPos


mtcnn_weights_dir = "./mtcnn_weights/"
fd = MTCNNFaceDetector(sess=K.get_session(), model_path=mtcnn_weights_dir)

model = KerasELG()
model.net.load_weights("./elg_weights/elg_keras.h5")

fn = "./240281904_1.jpg"
input_img = cv2.imread(fn)[..., ::-1]

face, lms = fd.detect_face(
    input_img)  # assuming there is only one face in input image
assert len(face) >= 1, "No face detected"
print(right_eye_xy)
if len(face) > 1:

    left_eye_xy = np.array([lms[6][0], lms[1]][0])
    right_eye_xy = np.array([lms[5][0], lms[0][0]])
else:
    left_eye_xy = np.array([lms[6], lms[1]])
    right_eye_xy = np.array([lms[5], lms[0]])

dist_eyes = np.linalg.norm(left_eye_xy - right_eye_xy)
# bounding box
eye_bbox_w = (dist_eyes / 1.25)
eye_bbox_h = (eye_bbox_w * 0.6)