コード例 #1
0
def conversion(video_path_A):
    if len(sys.argv)<=2:
        output_video_path_A = "./Aout.mp4"
    elif len(sys.argv)==3:
        output_video_path_A = sys.argv[3]

    K.set_learning_phase(0)

    # Input/Output resolution
    RESOLUTION = 64 # 64x64, 128x128, 256x256
    assert (RESOLUTION % 64) == 0, "RESOLUTION should be 64, 128, 256"

    # Architecture configuration
    arch_config = {}
    arch_config['IMAGE_SHAPE'] = (RESOLUTION, RESOLUTION, 3)
    arch_config['use_self_attn'] = True
    arch_config['norm'] = "instancenorm" # instancenorm, batchnorm, layernorm, groupnorm, none
    arch_config['model_capacity'] = "standard" # standard, lite

    model = FaceswapGANModel(**arch_config)

    model.load_weights(path="./models")

    mtcnn_weights_dir = "./mtcnn_weights/"

    fd = MTCNNFaceDetector(sess=K.get_session(), model_path=mtcnn_weights_dir)
    vc = VideoConverter()

    vc.set_face_detector(fd)
    vc.set_gan_model(model)

    options = {
        # ===== Fixed =====
        "use_smoothed_bbox": True,
        "use_kalman_filter": True,
        "use_auto_downscaling": False,
        "bbox_moving_avg_coef": 0.65,
        "min_face_area": 35 * 35,
        "IMAGE_SHAPE": model.IMAGE_SHAPE,
        # ===== Tunable =====
        "kf_noise_coef": 3e-3,
        "use_color_correction": "hist_match",
        "detec_threshold": 0.7,
        "roi_coverage": 0.9,
        "enhance": 0.5,
        "output_type": 3,
        "direction": "AtoB",
    }

    input_fn = video_path_A
    output_fn = output_video_path_A
    duration = None
    vc.convert(input_fn=input_fn, output_fn=output_fn, options=options, duration=duration)
コード例 #2
0
        "bbox_moving_avg_coef": 0.70,  # 0.65
        "min_face_area": 128 * 128,
        "IMAGE_SHAPE": model.IMAGE_SHAPE,
        # ===== Tunable =====
        "kf_noise_coef": 1e-3,
        "use_color_correction": "hist_match",
        "detec_threshold": 0.8,
        "roi_coverage": 0.92,
        "enhance": 0.,
        "output_type": 1,
        "direction":
        "BtoA",  # ==================== This line determines the transform direction ====================
    }

    model.load_weights(path=models_dir)
    fd = MTCNNFaceDetector(sess=K.get_session(), model_path="./mtcnn_weights/")
    vc = VideoConverter()
    vc.set_face_detector(fd)
    vc.set_gan_model(model)
    vc._init_kalman_filters(options["kf_noise_coef"])

    # fn_source_video = "obama_new.mp4"
    # fn_target_video = 'zyl.mp4'
    #
    # if options["direction"] == "AtoB":
    #     input_fn = fn_source_video
    #     output_fn = "OUTPUT_VIDEO_AtoB.mp4"
    # elif options["direction"] == "BtoA":
    #     input_fn = fn_target_video
    #     output_fn = "OUTPUT_VIDEO_BtoA.mp4"
    #
コード例 #3
0
def test_faceswap(person, model_path, test_path, save_path):
    mtcnn_weights_dir = "./mtcnn_weights/"
    fd = MTCNNFaceDetector(sess=K.get_session(), model_path=mtcnn_weights_dir)

    da_config, arch_config, loss_weights, loss_config = get_model_params()

    model = FaceswapGANModel(**arch_config)
    model.load_weights(path=model_path)

    ftrans = FaceTransformer()
    ftrans.set_model(model)

    # Read input image
    test_imgs = glob.glob(test_path + '/*.jpg')
    Path(save_path).mkdir(parents=True, exist_ok=True)

    for test_img in test_imgs:
        input_img = plt.imread(test_img)[..., :3]

        if input_img.dtype == np.float32:
            print("input_img has dtype np.float32 (perhaps the image format is PNG). Scale it to uint8.")
            input_img = (input_img * 255).astype(np.uint8)

        # Display detected face
        faces, lms = fd.detect_face(input_img)
        if len(faces) == 0:
            continue
        x0, y1, x1, y0, _ = faces[0]
        det_face_im = input_img[int(x0):int(x1), int(y0):int(y1), :]
        try:
            src_landmarks = get_src_landmarks(x0, x1, y0, y1, lms)
            tar_landmarks = get_tar_landmarks(det_face_im)
            aligned_det_face_im = landmarks_match_mtcnn(det_face_im, src_landmarks, tar_landmarks)
        except:
            print("An error occured during face alignment.")
            aligned_det_face_im = det_face_im
        # plt.imshow(aligned_det_face_im)
        # Transform detected face
        result_img, result_rgb, result_mask = ftrans.transform(
            aligned_det_face_im,
            direction="BtoA",
            roi_coverage=0.93,
            color_correction="adain_xyz",
            IMAGE_SHAPE=(RESOLUTION, RESOLUTION, 3)
        )
        try:
            result_img = landmarks_match_mtcnn(result_img, tar_landmarks, src_landmarks)
            result_rgb = landmarks_match_mtcnn(result_rgb, tar_landmarks, src_landmarks)
            result_mask = landmarks_match_mtcnn(result_mask, tar_landmarks, src_landmarks)
        except:
            print("An error occured during face alignment.")
            pass

        result_input_img = input_img.copy()
        result_input_img[int(x0):int(x1), int(y0):int(y1), :] = result_mask.astype(np.float32) / 255 * result_rgb + \
                                                                (1 - result_mask.astype(
                                                                    np.float32) / 255) * result_input_img[int(x0):int(x1),
                                                                                         int(y0):int(y1), :]

        img_name = os.path.basename(test_img)
        plt.imshow(result_input_img)
        plt.imsave(f'{save_path}/{img_name}', result_input_img)
コード例 #4
0
def face_transform():
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # 不全部占满显存, 按需分配
    session = tf.Session(config=config)
    # 设置session
    K.set_session(session)
    models_dir = "../models"
    RESOLUTION = 256  # 64x64, 128x128, 256x256
    # Architecture configuration
    arch_config = {}
    arch_config['IMAGE_SHAPE'] = (RESOLUTION, RESOLUTION, 3)
    arch_config['use_self_attn'] = True

    arch_config[
        'norm'] = "instancenorm"  # instancenorm, batchnorm, layernorm, groupnorm, none
    arch_config['model_capacity'] = "standard"  # standard, lite

    model = FaceswapGANModel(**arch_config)

    options = {
        # ===== Fixed =====
        "use_smoothed_bbox": True,
        "use_kalman_filter": True,
        "use_auto_downscaling": False,
        "bbox_moving_avg_coef": 0.70,  # 0.65
        "min_face_area": 128 * 128,
        "IMAGE_SHAPE": model.IMAGE_SHAPE,
        # ===== Tunable =====
        "kf_noise_coef": 1e-3,
        "use_color_correction": "hist_match",
        "detec_threshold": 0.8,
        "roi_coverage": 0.92,
        "enhance": 0.,
        "output_type": 1,
        "direction":
        "BtoA",  # ==================== This line determines the transform direction ====================
    }

    model.load_weights(path=models_dir)
    fd = MTCNNFaceDetector(sess=K.get_session(),
                           model_path="../mtcnn_weights/")
    vc = VideoConverter()
    vc.set_face_detector(fd)
    vc.set_gan_model(model)
    vc._init_kalman_filters(options["kf_noise_coef"])

    def transform(imageB64):
        """
        :param imageB64: 图片base64编码
        :return: 转换后的图片base64编码
        """
        rgb_img = base64_to_image(imageB64)

        result = vc.process_video(rgb_img, options)
        result = normalization(result)

        r, g, b = cv2.split(result)
        img_bgr = cv2.merge([b, g, r])
        return np.array(result).tolist()
        # bgr base64
        # res_base64 = image_to_base64(img_bgr)
        # return res_base64

    return transform
コード例 #5
0
ファイル: rtmp_queue.py プロジェクト: houzhengzhang/faceswap
def transform_img(inStack, outStack):
    print('Process to transform: %s    ' % os.getpid(), time.time())

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # 不全部占满显存, 按需分配
    session = tf.Session(config=config)
    # 设置session
    K.set_session(session)

    models_dir = "../models_transform"
    RESOLUTION = 256  # 64x64, 128x128, 256x256
    # Architecture configuration
    arch_config = {}
    arch_config['IMAGE_SHAPE'] = (RESOLUTION, RESOLUTION, 3)
    arch_config['use_self_attn'] = True
    # TODO 归一化设置
    arch_config[
        'norm'] = "instancenorm"  # instancenorm, batchnorm, layernorm, groupnorm, none
    arch_config['model_capacity'] = "standard"  # standard, lite

    model = FaceswapGANModel(**arch_config)

    options = {
        # ===== Fixed =====
        "use_smoothed_bbox": True,
        "use_kalman_filter": True,
        "use_auto_downscaling": False,
        "bbox_moving_avg_coef": 0.65,  # 0.65
        "min_face_area": 35 * 35,
        "IMAGE_SHAPE": model.IMAGE_SHAPE,
        # ===== Tunable =====
        "kf_noise_coef": 1e-3,
        "use_color_correction": "hist_match",
        "detec_threshold": 0.8,
        "roi_coverage": 0.90,
        "enhance": 0.,
        "output_type": 1,
        "direction":
        "BtoA",  # ==================== This line determines the transform direction ====================
    }

    model.load_weights(path=models_dir)
    fd = MTCNNFaceDetector(sess=K.get_session(),
                           model_path="../mtcnn_weights/")
    vc = VideoConverter()
    vc.set_face_detector(fd)
    vc.set_gan_model(model)
    vc._init_kalman_filters(options["kf_noise_coef"])

    while True:
        if len(inStack) != 0:
            start_time = time.time()
            rgb_img = inStack.pop()
            # print("transform_img inputQ size ;",inputQ.qsize())
            # 获取转换后的人脸
            result = vc.process_video(rgb_img, options)
            result = normalization(result) * 255
            result = np.uint8(result)

            print(time.time() - start_time)
            outStack.append(result)
コード例 #6
0
import os
from preprocess import preprocess_video

os.environ["CUDA_VISIBLE_DEVICES"] = "1"

TOTAL_ITERS = 34000

ML_Server_Trump_Video_Path = '/home/ronczka/Data/Trump/Rede.mp4'
ML_Server_Me_Video_Path = '/home/ronczka/Data/Me/test_videp.mp4'

Local_Trump_Video_Path = 'D:\QSync\Master_Studium\Semester3\Hauptseminar\Data\Trump\Rede.mp4'
Local_Me_Video_Path = 'D:\QSync\Master_Studium\Semester3\Hauptseminar\Data\Video_me\test_videp.mp4'

sess = K.get_session()

fd = MTCNNFaceDetector(sess=sess, model_path="./mtcnn_weights/")

fn_source_video = ML_Server_Me_Video_Path
res = os.path.exists(fn_source_video)
print(res)
fn_target_video = ML_Server_Trump_Video_Path
res = os.path.exists(fn_source_video)
print(res)

os.makedirs("faceA/rgb")
os.makedirs("faceA/binary_mask")
os.makedirs("faceB/rgb")
os.makedirs("faceB/binary_mask")

save_interval = 5  # perform face detection every {save_interval} frames
save_path = "./faceA/"