def test_confignet_finetune(test_asset_dir, model_dir, resolution):
    model_path = os.path.join(model_dir, "confignet_%d" % resolution,
                              "model.json")
    model = ConfigNet.load(model_path)

    normalized_image = get_normalized_test_image(test_asset_dir,
                                                 (resolution, resolution))

    with tf.device('/cpu:0'):
        embedding, rotation = model.fine_tune_on_img(
            normalized_image[np.newaxis], n_iters=1)
        decoded_image = model.generate_images(embedding, rotation)

    reference_value_file = os.path.join(
        test_asset_dir, "confignet_finetune_ref_%d.npz" % resolution)
    # set to True to save results as reference
    save_reference = False
    if save_reference:
        np.savez(reference_value_file,
                 embedding=embedding,
                 rotation=rotation,
                 decoded_image=decoded_image)

    reference_vals = np.load(reference_value_file)
    assert np.allclose(embedding, reference_vals["embedding"])
    assert np.allclose(rotation, reference_vals["rotation"])
    assert np.allclose(decoded_image, reference_vals["decoded_image"])
Esempio n. 2
0
def get_embedding_with_new_attribute_value(parameter_name:str, latents: np.ndarray, confignet_model: ConfigNet) -> np.ndarray:
    '''Samples a new value of the currently controlled face attribute and sets in the latent embedding'''

    new_param_value = confignet_model.facemodel_param_distributions[parameter_name].sample(1)[0]
    modified_latents = confignet_model.set_facemodel_param_in_latents(latents, parameter_name, new_param_value)

    return modified_latents
Esempio n. 3
0
def set_gaze_direction_in_embedding(latents: np.ndarray, eye_pose: np.ndarray, confignet_model: ConfigNet) -> np.ndarray:
    '''Sets the selected eye pose in the specified latent variables
       This is accomplished by passing the eye pose through the synthetic data encoder
       and setting the corresponding part of the latent vector.
    '''
    latents = confignet_model.set_facemodel_param_in_latents(latents, "bone_rotations:left_eye", eye_pose)
    return latents
Esempio n. 4
0
def get_new_embeddings(input_images, latentgan_model: LatentGAN,
                       confignet_model: ConfigNet):
    '''Samples new embeddings from either:
        - the LatentGAN if no input images were provided
        - by embedding the input images into the latent space using the real encoder
    '''
    if input_images is None:

        embeddings = latentgan_model.generate_latents(1, truncation=0.7)
        rotations = np.zeros((1, 3), dtype=np.float32)
        orig_images = confignet_model.generate_images(embeddings, rotations)
    else:
        sample_indices = np.random.randint(0, len(input_images), 1)
        orig_images = np.array([input_images[x] for x in sample_indices])
        embeddings, rotations = confignet_model.encode_images(orig_images)

    return embeddings, rotations, orig_images
Esempio n. 5
0
def get_new_embeddings(args, input_images, latentgan_model: LatentGAN, confignet_model: ConfigNet):
    '''Samples new embeddings from either:
        - the LatentGAN if no input images were provided
        - by embedding the input images into the latent space using the real encoder
    '''
    if input_images is None:
        n_samples = args.n_rows * args.n_cols
        embeddings = latentgan_model.generate_latents(n_samples, truncation=0.7)
        rotations = np.zeros((n_samples, 3), dtype=np.float32)
        orig_images = confignet_model.generate_images(embeddings, rotations)
    else:
        # special case for one image so the demo is faster and nicer
        if len(input_images) == 1:
            args.n_rows = 1
            args.n_cols = 1
        n_samples = args.n_rows * args.n_cols
        sample_indices = np.random.randint(0, len(input_images), n_samples)
        orig_images = np.array([input_images[x] for x in sample_indices])
        embeddings, rotations = confignet_model.encode_images(orig_images)

    return embeddings, rotations, orig_images
def test_confignet_basic(test_asset_dir, model_dir, resolution):
    model_path = os.path.join(model_dir, "confignet_%d" % resolution,
                              "model.json")
    model = ConfigNet.load(model_path)

    with tf.device('/cpu:0'):
        normalized_image = get_normalized_test_image(test_asset_dir,
                                                     (resolution, resolution))
        embedding, rotation = model.encode_images(normalized_image[np.newaxis])
        decoded_image = model.generate_images(embedding, rotation)

        n_blendshapes = model.config["facemodel_inputs"]["blendshape_values"][
            0]

        neutral_expression = np.zeros((1, n_blendshapes), np.float32)
        modified_embedding = model.set_facemodel_param_in_latents(
            embedding, "blendshape_values", neutral_expression)
        decoded_image_modified = model.generate_images(embedding, rotation)

    reference_value_file = os.path.join(
        test_asset_dir, "confignet_basic_ref_%d.npz" % resolution)
    # set to True to save results as reference
    save_reference = False
    if save_reference:
        np.savez(reference_value_file,
                 embedding=embedding,
                 rotation=rotation,
                 decoded_image=decoded_image,
                 modified_embedding=modified_embedding,
                 decoded_image_modified=decoded_image_modified)

    reference_vals = np.load(reference_value_file)
    assert np.allclose(embedding, reference_vals["embedding"])
    assert np.allclose(rotation, reference_vals["rotation"])
    assert np.allclose(decoded_image, reference_vals["decoded_image"])
    assert np.allclose(modified_embedding,
                       reference_vals["modified_embedding"])
    assert np.allclose(decoded_image_modified,
                       reference_vals["decoded_image_modified"])
def test_latent_gan(model_dir, test_asset_dir, resolution):
    latentgan_model_path = os.path.join(model_dir, "latentgan_%d" % resolution,
                                        "model.json")
    confignet_model_path = os.path.join(model_dir, "confignet_%d" % resolution,
                                        "model.json")

    latentgan = LatentGAN.load(latentgan_model_path)
    confignet = ConfigNet.load(confignet_model_path)

    np.random.seed(0)
    with tf.device('/cpu:0'):
        confignet_latents = latentgan.generate_latents(1)
        generated_imgs = confignet.generate_images(confignet_latents,
                                                   np.zeros((1, 3)))

    reference_value_file = os.path.join(test_asset_dir,
                                        "latentgan_ref_%d.npz" % resolution)
    # set to True to save results as reference
    save_reference = False
    if save_reference:
        np.savez(reference_value_file, generated_imgs=generated_imgs)

    reference_vals = np.load(reference_value_file)
    assert np.allclose(generated_imgs, reference_vals["generated_imgs"])
Esempio n. 8
0
def run(args):
    args = parse_args(args)
    if args.image_path is not None:
        input_images = process_image(args.image_path, args.resolution)
        latentgan_model = None
    else:
        input_images = None
        print(
            "WARNING: no input image directory specified, embeddings will be sampled using Laten GAN"
        )
        latentgan_model = LatentGAN.load(args.latent_gan_model_path)
    confignet_model = ConfigNet.load(args.confignet_model_path)

    #basic_ui = BasicUI(confignet_model)

    # Sample latent embeddings from input images if available and if not sample from Latent GAN
    current_embedding_unmodified, current_rotation, orig_images = get_new_embeddings(
        input_images, latentgan_model, confignet_model)
    # Set next embedding value for rendering
    if args.enable_sr == 1:
        modelSR = generator()
        modelSR.load_weights('evaluation/weights/srgan/gan_generator.h5')

    yaw_min_angle = -args.max_angle
    pitch_min_angle = -args.max_angle
    yaw_max_angle = args.max_angle
    pitch_max_angle = args.max_angle
    delta_angle = 5

    rotation_offset = np.zeros((1, 3))

    eye_rotation_offset = np.zeros((1, 3))

    facemodel_param_names = list(
        confignet_model.config["facemodel_inputs"].keys())
    # remove eye rotation as in the demo it is controlled separately
    eye_rotation_param_idx = facemodel_param_names.index(
        "bone_rotations:left_eye")
    facemodel_param_names.pop(eye_rotation_param_idx)

    render_input_interp_0 = current_embedding_unmodified
    render_input_interp_1 = current_embedding_unmodified

    interpolation_coef = 0
    if not os.path.exists(dataset_directory):
        os.makedirs(dataset_directory)
    # This interpolates between the previous and next set embeddings
    current_renderer_input = render_input_interp_0 * (
        1 - interpolation_coef) + render_input_interp_1 * interpolation_coef
    # Set eye gaze direction as controlled by the user
    current_renderer_input = set_gaze_direction_in_embedding(
        current_renderer_input, eye_rotation_offset, confignet_model)

    # all angles
    #image = Image.open(args.image_path)
    #print(np.array(image))
    #return
    i = 1
    print('All angles')
    for yaw in range(yaw_min_angle, yaw_max_angle + 1, delta_angle):
        for pitch in range(pitch_min_angle, pitch_max_angle + 1, delta_angle):
            rotation_offset[0, 0] = to_rad(yaw)
            rotation_offset[0, 1] = to_rad(pitch)
            generated_imgs = confignet_model.generate_images(
                current_renderer_input, current_rotation + rotation_offset)
            if args.enable_sr == 1:
                img = cv2.resize(generated_imgs[0], (256, 256))
                sr_img = resolve_single(modelSR, img)
                cv2.imwrite(dataset_directory + '/%d_%d.png' % (yaw, pitch),
                            np.array(sr_img))
            else:
                img = cv2.resize(generated_imgs[0], (1024, 1024))
                cv2.imwrite(dataset_directory + '/%d_%d.png' % (yaw, pitch),
                            img)
            print(i)
            i += 1

    #all random
    # 100 картинок со случайными поворотами от -20 до 20, поворотами глаз, выражений лица
    print('All random')
    current_attribute_name = facemodel_param_names[1]  #blendshape_values
    frame_embedding = render_input_interp_0 * (
        1 - interpolation_coef) + render_input_interp_1 * interpolation_coef
    for i in range(100):
        eye_rotation_offset[0, 2] = to_rad(np.random.randint(-40, 40))
        eye_rotation_offset[0, 0] = to_rad(np.random.randint(-40, 40))
        rotation_offset[0, 0] = to_rad(np.random.randint(-20, 20))
        rotation_offset[0, 1] = to_rad(np.random.randint(-20, 20))
        frame_embedding = set_gaze_direction_in_embedding(
            frame_embedding, eye_rotation_offset, confignet_model)
        new_embedding_value = get_embedding_with_new_attribute_value(
            current_attribute_name, frame_embedding, confignet_model)

        generated_imgs = confignet_model.generate_images(
            new_embedding_value, current_rotation + rotation_offset)

        if args.enable_sr == 1:
            img = cv2.resize(generated_imgs[0], (256, 256))
            sr_img = resolve_single(modelSR, img)
            cv2.imwrite(dataset_directory + '/random_%d.png' % (i),
                        np.array(sr_img))
        else:
            img = cv2.resize(generated_imgs[0], (1024, 1024))
            cv2.imwrite(dataset_directory + '/random_%d.png' % (i), img)
        print(i)
Esempio n. 9
0
def run(args):
    print_intro()
    print_instructions()

    args = parse_args(args)
    if args.image_path is not None:
        input_images = process_images(args.image_path, args.resolution)
        latentgan_model = None
    else:
        input_images = None
        print(
            "WARNING: no input image directory specified, embeddings will be sampled using Laten GAN"
        )
        latentgan_model = LatentGAN.load(args.latent_gan_model_path)
    confignet_model = ConfigNet.load(args.confignet_model_path)

    basic_ui = BasicUI(confignet_model)

    # Sample latent embeddings from input images if available and if not sample from Latent GAN
    current_embedding_unmodified, current_rotation, orig_images = get_new_embeddings(
        args, input_images, latentgan_model, confignet_model)
    # Set next embedding value for rendering
    basic_ui.set_next_embeddings(current_embedding_unmodified)

    while not basic_ui.exit:
        # This interpolates between the previous and next set embeddings
        current_renderer_input = basic_ui.get_current_frame_embeddings()
        # Set eye gaze direction as controlled by the user
        current_renderer_input = set_gaze_direction_in_embedding(
            current_renderer_input, basic_ui.eye_rotation_offset,
            confignet_model)

        generated_imgs = confignet_model.generate_images(
            current_renderer_input,
            current_rotation + basic_ui.rotation_offset)

        white_strip = np.full(
            (generated_imgs.shape[0], generated_imgs.shape[1], 20, 3), 255,
            np.uint8)
        visualization_imgs = np.dstack(
            (orig_images, generated_imgs, white_strip))

        image_matrix = build_image_matrix(visualization_imgs, args.n_rows,
                                          args.n_cols)

        basic_ui.perform_per_frame_actions()

        if not args.test_mode:
            key = cv2.imshow("img", image_matrix)
        key = cv2.waitKey(1)

        key = basic_ui.drive_ui(key, args.test_mode)

        if key == ord(" ") or args.test_mode:
            current_embedding_unmodified, current_rotation, orig_images = get_new_embeddings(
                args, input_images, latentgan_model, confignet_model)
            basic_ui.set_next_embeddings(current_embedding_unmodified)
        if key == ord("v") or args.test_mode:
            basic_ui.set_next_embeddings(current_embedding_unmodified)
        if key == ord("x") or args.test_mode:
            current_attribute_name = basic_ui.facemodel_param_names[
                basic_ui.controlled_param_idx]
            new_embedding_value = get_embedding_with_new_attribute_value(
                current_attribute_name,
                basic_ui.get_current_frame_embeddings(), confignet_model)
            basic_ui.set_next_embeddings(new_embedding_value)
        if key == ord("b") or args.test_mode:
            if input_images is None or len(input_images) != 1:
                print(
                    "For one-shot learning to work you need to specify a single input image path"
                )
                continue
            if args.test_mode:
                n_fine_tuning_iters = 1
            else:
                n_fine_tuning_iters = 50
            print(
                "Fine tuning generator on single image, this might take a minute or two"
            )
            current_embedding_unmodified, current_rotation = confignet_model.fine_tune_on_img(
                input_images[0], n_fine_tuning_iters)
            basic_ui.set_next_embeddings(current_embedding_unmodified)
        if key == ord("h") or args.test_mode:
            print_intro()
            basic_ui.print_instructions()
            print_instructions()

        if args.test_mode:
            break