コード例 #1
0
def main():
    plt.close("all")
    tf.enable_eager_execution()

    envName = "kitchen"
    envFile = envName + "_probe.hdr"
    albedoMap = imageio.imread("../Data02_isomaps/AppearanceMap_test.png") #("Albedo.png")
    normalMap = imageio.imread("../Data02_isomaps/NormalMap_test.png") #("Normal.png")
    envMap = imageio.imread(envName + '_probe.hdr', format='HDR-FI')

    resize_rate = 0.05
    (mIm, nIm, dIm) = albedoMap.shape
    (mEnv, nEnv, dEnv) = envMap.shape
    d = 3

    normalProcessed = normalMapProcessing.processNormalMap(normalMap, mIm, nIm)
    normalizingValue = tf.constant(255.)
    albedoTensor = tf.constant(albedoMap, dtype=tf.float32)
    normalTensor = tf.constant(normalProcessed, dtype=tf.float32)
    envMapTensor = tf.constant(envMap)
    albedoTensor = tf.scalar_mul(1. / normalizingValue, albedoTensor[:, :, :3])

    mEnv = int(resize_rate * mEnv)
    nEnv = int(resize_rate * nEnv)
    envMapTensor = tf.image.resize(envMapTensor, tf.constant([mEnv, nEnv]))
    print((mEnv, nEnv))

    plt.imshow(envMapTensor)
    plt.show()

    ## Calculate envMap orientations
    envVectors = envMapOrientation.envMapOrientation(mEnv, nEnv)
    envOrientationTensor = tf.constant(envVectors, dtype=tf.float32)

    # normalTensor = tf.scalar_mul(1./normalizingValue, normalTensor)
    # normalTensor = (normalTensor - 0.5 ) * 2.

    normalTensor = tf.reshape(normalTensor, [mIm * nIm, 3])
    envOrientationTensor = tf.reshape(envOrientationTensor, [3, mEnv * nEnv])

    cosineTensor = tf.matmul(normalTensor, envOrientationTensor)
    cosineTensor = tf.clip_by_value(cosineTensor, 0, 1)
    # cosineTensor = tf.reshape(cosineTensor,[mIm,nIm,mEnv * nEnv])
    # resTensor = tf.get_variable("resTensor", (mIm,nIm,d),dtype = tf.float32,initializer = tf.zeros_initializer())

    envNormalization = tf.constant(math.pi * mEnv * nEnv / 4.)

    envMapTensor = tf.reshape(envMapTensor, [mEnv * nEnv, 3])
    shadedBrightness = tf.matmul(cosineTensor, envMapTensor)
    shadedBrightness = tf.scalar_mul(1. / envNormalization, tf.reshape(shadedBrightness, [mIm, nIm, 3]))

    # unshadedBrightness = tf.scalar_mul(1./envNormalization,tf.reduce_sum(envMapTensor,[0,1]))

    gamma = 2.2
    resTensor = tf.multiply(albedoTensor, shadedBrightness)
    resTensorTM = tf.pow(resTensor, gamma)
    resTensor = tf.scalar_mul(normalizingValue, resTensor)
    resTensorTM = tf.scalar_mul(normalizingValue, resTensorTM)

    resIm = np.array(resTensor)
    resImTM = np.array(resTensorTM)
    plt.imshow(resIm.astype(int))
    plt.show()
    plt.imshow(resImTM.astype(int))
    plt.show()

    imageio.imsave("result_" + envName + ".jpg", resIm)
コード例 #2
0
def main():
    plt.close("all")
    tf.enable_eager_execution()

    folder_write_name = "Data_synthesized/"
    extension_nor_app = "AppearanceNormal/"
    extension_env = "EnvMap/"
    code_sample = "S4_I3_"

    envDir = "EnvMaps/"
    envName = envDir + "STUDIOATM_13SN"
    if (predict_sphere_envMap[1]):
        envFile = envName + "_probe.hdr"
    else:
        #envFile = folder_write_name + extension_env + code_sample + "Illum.hdr"
        envFile = envName + ".hdr"
    albedoMap = load_rgb(
        "Chicago_albedo.png")  #"Data02_isomaps/AppearanceMap_test.png")
    normalMap = load_rgb(
        "Chicago_normal.png")  #"Data02_isomaps/NormalMap_test.png")
    envMap = load_rgb(envFile, -1)
    print("Before resize : max: {:.3f}, mean: ({:.3f})".format(
        np.amax(envMap), np.mean(envMap)))

    shutil.copy(
        "Chicago_normal.png",
        folder_write_name + extension_nor_app + code_sample + "Normal_UV.png")
    shutil.copy(envFile,
                folder_write_name + extension_env + code_sample + "Illum.hdr")

    (mEnv, nEnv, dEnv) = envMap.shape
    print((mEnv, nEnv))
    plt.imshow(envMap)
    plt.show()
    albedoMap = cv2.resize(albedoMap, (256, 256))
    normalMap = cv2.resize(normalMap, (256, 256))
    envMap = cv2.resize(envMap, (3 * 32, 3 * 16),
                        interpolation=cv2.INTER_LINEAR)

    print("After resize : max: {:.3f}, mean: ({:.3f})".format(
        np.amax(envMap), np.mean(envMap)))

    (mIm, nIm, dIm) = albedoMap.shape
    (mEnv, nEnv, dEnv) = envMap.shape
    input_shape = albedoMap.shape
    d = 3

    plt.imshow(envMap)
    plt.show()
    plt.imshow(albedoMap)
    plt.show()
    plt.imshow(normalMap)
    plt.show()

    # cv2.namedWindow('albedo', cv2.WINDOW_NORMAL)
    # cv2.imshow('albedo', albedoMap)
    # cv2.namedWindow('normal', cv2.WINDOW_NORMAL)
    # cv2.imshow('normal', normalMap)
    # cv2.namedWindow('envMap', cv2.WINDOW_NORMAL)
    # cv2.imshow('envMap', envMap)
    # cv2.waitKey(0)

    gamma = tf.constant(2.2)
    invGamma = tf.constant(1. / 2.2)
    normalizingValue = tf.constant(255.)
    albedoTensor = tf.constant(albedoMap[:, :, :3], dtype=tf.float32)
    normalTensor = tf.constant(normalMap[:, :, :3], dtype=tf.float32)
    envMapTensor = tf.constant(envMap, dtype=tf.float32)
    albedoTensor = tf.scalar_mul(1. / normalizingValue, albedoTensor[:, :, :3])
    normalTensor = tf.scalar_mul(1. / normalizingValue, normalTensor[:, :, :3])
    #albedoTensor = tf.pow(albedoTensor,gamma)

    if (predict_sphere_envMap[1]):
        envNormalization = tf.constant(math.pi * mEnv * nEnv / 4.)
    else:
        envNormalization = tf.constant((float)(mEnv * nEnv))

    ## Calculate envMap orientations
    envVectors = envMapOrientation.envMapOrientation(mEnv, nEnv,
                                                     predict_sphere_envMap[1])
    envOrientationTensor = tf.constant(envVectors, dtype=tf.float32)
    envOrientationTensor = tf.reshape(envOrientationTensor, [3, mEnv * nEnv])
    envMapTensor = tf.reshape(envMapTensor, [mEnv * nEnv, 3])

    autoencoder_model = FirstGenerator(input_shape, envOrientationTensor,
                                       envMapTensor, envNormalization,
                                       predict_sphere_envMap, high_res_mode)

    normalTensor = tf.reshape(normalTensor, [1, mIm, nIm, d])
    albedoTensor = tf.reshape(albedoTensor, [1, mIm, nIm, d])

    resTensor = autoencoder_model.render(normalTensor, albedoTensor)
    resTensorGamma = tf.pow(resTensor, invGamma)

    res_save = 255. * np.array(resTensorGamma[0])
    res_save = convert_rgb_to_cv2(res_save)
    res_save = res_save.astype(int)

    plt.imshow(resTensor[0])
    plt.show()
    plt.imshow(resTensorGamma[0])
    plt.show()

    #cv2.imwrite(folder_name+ "envMap_resized.hdr" , envMap)
    cv2.imwrite(
        folder_write_name + extension_nor_app + code_sample +
        "Appearance_UV.png", res_save)
    #cv2.imwrite(folder_name+ "normalMap_resized.png" , normalMap)

    return
コード例 #3
0
def main():
    plt.close("all")
    tf.enable_eager_execution()

    codes = [
        "S1_I1", "S1_I2", "S1_I3", "S2_I1", "S2_I2", "S2_I3", "S3_I1", "S3_I2",
        "S3_I3", "S4_I1", "S4_I2", "S4_I3"
    ]

    # code = "S1_I1"

    for code in codes:

        data_folder = "Data_synthesized/"
        envDir = data_folder + "EnvMap/"
        envName = envDir + code + "_Illum"
        if (predict_sphere_envMap[1]):
            envFile = envName + "_probe.hdr"
        else:
            envFile = envName + ".hdr"

        albedo_folder = data_folder + "Albedo/"
        normal_folder = data_folder + "AppearanceNormal/"
        albedo_file = albedo_folder + code + ".png"
        normal_file = normal_folder + code + "_Normal_UV.png"
        albedoMap = load_rgba(
            albedo_file)  # "Data02_isomaps/AppearanceMap_test.png")
        normalMap = load_rgb(
            normal_file)  # "Data02_isomaps/NormalMap_test.png")

        data_input_folder_name = "Data_to_synthesize"

        input_shape = (256, 256, 3)
        num_replicate = 1
        (dataset_train_input,
         num_train_input) = load_input_data_with_normals_and_replicate(
             data_input_folder_name, input_shape, True, num_replicate)
        envMap = load_rgb(envFile, -1)
        print("Before resize : max: {:.3f}, mean: ({:.3f})".format(
            np.amax(envMap), np.mean(envMap)))

        (mEnv, nEnv, dEnv) = envMap.shape
        print((mEnv, nEnv))
        plt.imshow(envMap)
        plt.show()
        albedoMap = cv2.resize(albedoMap, (256, 256))
        normalMap = cv2.resize(normalMap, (256, 256))
        envMap = cv2.resize(envMap, (64, 32), interpolation=cv2.INTER_LINEAR)

        print("After resize : max: {:.3f}, mean: ({:.3f})".format(
            np.amax(envMap), np.mean(envMap)))

        (mIm, nIm, dIm) = albedoMap.shape
        (mEnv, nEnv, dEnv) = envMap.shape
        input_shape = albedoMap.shape
        d = 3

        plt.imshow(envMap)
        plt.show()
        plt.imshow(albedoMap)
        plt.show()
        plt.imshow(normalMap)
        plt.show()

        # cv2.namedWindow('albedo', cv2.WINDOW_NORMAL)
        # cv2.imshow('albedo', albedoMap)
        # cv2.namedWindow('normal', cv2.WINDOW_NORMAL)
        # cv2.imshow('normal', normalMap)
        # cv2.namedWindow('envMap', cv2.WINDOW_NORMAL)
        # cv2.imshow('envMap', envMap)
        # cv2.waitKey(0)

        gamma = tf.constant(2.2)
        invGamma = tf.constant(1. / 2.2)
        normalizingValue = tf.constant(255.)
        albedoTensor = tf.constant(albedoMap[:, :, :3], dtype=tf.float32)
        normalTensor = tf.constant(normalMap[:, :, :3], dtype=tf.float32)
        envMapTensor = tf.constant(envMap, dtype=tf.float32)
        albedoTensor = tf.scalar_mul(1. / normalizingValue,
                                     albedoTensor[:, :, :3])
        normalTensor = tf.scalar_mul(1. / normalizingValue,
                                     normalTensor[:, :, :3])
        # albedoTensor = tf.pow(albedoTensor,gamma)

        if (predict_sphere_envMap[1]):
            envNormalization = tf.constant(math.pi * mEnv * nEnv / 4.)
        else:
            envNormalization = tf.constant((float)(mEnv * nEnv))

        ## Calculate envMap orientations
        envVectors = envMapOrientation.envMapOrientation(
            mEnv, nEnv, predict_sphere_envMap[1])
        envOrientationTensor = tf.constant(envVectors, dtype=tf.float32)
        envOrientationTensor = tf.reshape(envOrientationTensor,
                                          [3, mEnv * nEnv])
        envMapTensor = tf.reshape(envMapTensor, [mEnv * nEnv, 3])

        autoencoder_model = FirstGenerator(input_shape, envOrientationTensor,
                                           envMapTensor, envNormalization,
                                           predict_sphere_envMap,
                                           high_res_mode)

        normalTensor = tf.reshape(normalTensor, [1, mIm, nIm, d])
        albedoTensor = tf.reshape(albedoTensor, [1, mIm, nIm, d])

        # resTensor = autoencoder_model.render(normalTensor,albedoTensor)
        resTensor = autoencoder_model.render_with_predicted_envMap(
            normalTensor, albedoTensor,
            tf.reshape(envMapTensor, [num_replicate, mEnv * nEnv, 3]))
        resTensorGamma = tf.pow(resTensor, invGamma)

        res_save = 255. * np.array(resTensorGamma[0])
        res_save = convert_rgb_to_cv2(res_save)
        res_save_temp = np.zeros([res_save.shape[0], res_save.shape[1], 4])
        res_save_temp[:, :, :3] = res_save[:, :, :]
        res_save_temp[:, :, 3] = albedoMap[:, :, 3]
        res_save = res_save_temp.astype(int)

        plt.imshow(resTensor[0])
        plt.show()
        plt.imshow(resTensorGamma[0])
        plt.show()

        cv2.imwrite(normal_folder + code + "_Appearance_UV.png", res_save)

    # x_train_input = dataset_train_input.batch(num_replicate)
    # for (batch, (inputs, labels_appearance, labels_normals, masks)) \
    #         in enumerate(x_train_input):
    #     resTensor = autoencoder_model.render_with_predicted_envMap(labels_normals, inputs,
    #                                                                tf.reshape(envMapsTensors,
    #                                                                           [num_replicate, mEnv * nEnv, 3]))
    #     resTensorGamma = tf.pow(resTensor, invGamma)
    #     res_save = 255. * np.array(resTensorGamma[0])
    #     res_save = convert_rgb_to_cv2(res_save)
    #     res_save_temp = np.zeros([res_save.shape[0], res_save.shape[1], 4])
    #     res_save_temp[:, :, :3] = res_save[:, :, :]
    #     res_save_temp[:, :, 3] = albedoMap[:, :, 3]
    #     res_save = res_save_temp.astype(int)
    #
    #     plt.imshow(envMapsTensors[0])
    #     plt.show()
    #     plt.imshow(resTensorGamma[0])
    #     plt.show()
    #     plt.imshow(envMapsTensors[1])
    #     plt.show()
    #     plt.imshow(resTensorGamma[1])
    #     plt.show()
    #
    # return

    return
コード例 #4
0
def main():
    input_shape = (256, 256, 3)
    shape_gt_adv = (512, 512, 3)
    mIm = input_shape[0]
    nIm = input_shape[1]
    if (high_res_mode):
        mIm_res = shape_gt_adv[0]
        nIm_res = shape_gt_adv[1]
    else:
        mIm_res = mIm
        nIm_res = nIm
    # Load data
    data_training_folder_name = "Synthesized_Training_1tris/"
    data_testing_folder_name = "Synthesized_Testing_1/"
    result_training_folder_name = data_training_folder_name + "Results/"
    (dataset_train_input,
     num_train_input) = load_input_data_with_albedo_and_envmaps(
         data_training_folder_name, learnable_envMap_size, input_shape, True)

    (dataset_test_input,
     num_test_input) = load_input_data_with_albedo_and_envmaps(
         data_testing_folder_name, learnable_envMap_size, input_shape, True)

    dataset_train_input = dataset_train_input.shuffle(num_train_input)
    x_train = dataset_train_input.batch(1)
    indices_plot = [0, 2, 3, 4]  #,5]
    plot_data_batches(x_train, indices_plot, 4)

    num_batches = int(num_train_input / batch_size)

    #albedoMapGT = load_rgb("Chicago_albedo.png")
    #albedoMapGT = cv2.resize(albedoMapGT, (256,256))
    #albedoMapTensor = tf.constant(albedoMapGT)
    #albedoMapTensor = tf.reshape(albedoMapTensor, [1,256,256,3])
    envDir = "EnvMaps/"
    envName = envDir + "village.hdr"
    envMap = load_rgb(envName, -1)
    (mEnv, nEnv, dEnv) = envMap.shape
    envMap = cv2.resize(envMap, (32, 16), interpolation=cv2.INTER_LINEAR)

    for (batch, (inputs, labels_appearance, labels_normals, masks, gt_albedo,
                 labels_envmap)) in enumerate(dataset_train_input.take(1)):
        envMapTensor = labels_envmap
        envMapTensor = tf.reshape(envMapTensor, [1, 16, 32, 3])
        envMapTensorShow = tonemap(envMapTensor, gamma)
        plt.imshow(envMapTensorShow[0])
        plt.show()
    if predict_sphere_envMap[0]:
        mEnv = learnable_envMap_size[0]
        nEnv = learnable_envMap_size[1]
    else:
        envmap_resize_rate = 0.04
        mEnv = int(envmap_resize_rate * mEnv)
        nEnv = int(envmap_resize_rate * nEnv)

    print("(mEnv,nEnv) = {},{}".format(mEnv, nEnv))
    ## Calculate envMap orientations
    envVectors = envMapOrientation.envMapOrientation(mEnv, nEnv,
                                                     predict_sphere_envMap[1])
    envOrientationTensor = tf.constant(envVectors, dtype=tf.float32)
    envOrientationTensor = tf.reshape(envOrientationTensor, [3, mEnv * nEnv])
    if (predict_sphere_envMap[1]):
        envNormalization = tf.constant(math.pi * mEnv * nEnv / 4.)
    else:
        envNormalization = tf.constant(float(mEnv * nEnv))

    checkpoint_write_dir = "model_saved"  #_10_samples_withEnvMap_OkResults/"
    checkpoint_load_dir = "model_10_samples_withEnvMap_OkResults/"
    checkpoint_write_prefix = checkpoint_write_dir + "adversarial"

    #autoencoder_model = SecondUVAutoencoder(input_shape, envOrientationTensor,envMapTensor, envNormalization, predict_envMap)
    generator_model = SecondGenerator(input_shape, envOrientationTensor,
                                      envMapTensor, envNormalization,
                                      predict_sphere_envMap, high_res_mode)
    #discriminator_model = FirstDiscriminator()

    generator_optimizer = tf.train.AdamOptimizer()
    #discriminator_optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate_disc)

    logdir = "./tb/"
    global_step = tf.train.get_or_create_global_step()
    summary_writer = tf.contrib.summary.create_file_writer(logdir,
                                                           flush_millis=10000)
    log_losses_ratio = 100.

    root = tf.train.Checkpoint(generator_model=generator_model)

    if (load_pre_trained_model):
        root.restore(tf.train.latest_checkpoint(checkpoint_load_dir))
        dataset_train_input = dataset_train_input.shuffle(num_train_input)
        x_train = dataset_train_input.batch(1)
        for (batch, (inputs, labels_appearance, labels_normals,
                     masks)) in enumerate(x_train.take(4)):
            (albedos_preds, normals_preds, appearances_preds,
             envMap_preds) = generator_model(inputs)
            plt.imshow(appearances_preds[0])
        plt.show()
        return

    num_itr = 0
    training_step = -1
    while (num_itr < epochs):
        dataset_train_input = dataset_train_input.shuffle(num_train_input)
        x_train_input = dataset_train_input.batch(batch_size)
        x_test_input = dataset_test_input.shuffle(num_test_input).batch(1)
        for (batch, ((inputs, labels_appearance, labels_normals, masks, gt_albedo, labels_envmap), \
                     (inputs_test, labels_appearance_test, labels_normals_test, masks_test, gt_albedo_test, labels_envmap_test))) \
                in enumerate(zip(x_train_input,x_test_input)):

            #plt.imshow(ground_truth_images[0])
            #plt.show()
            perform_testing = False
            with summary_writer.as_default(
            ), tf.contrib.summary.always_record_summaries():
                with tf.GradientTape(
                ) as gen_tape:  #, tf.GradientTape() as disc_tape:
                    (albedos_preds, normals_preds, appearances_preds,
                     envMap_preds) = generator_model(inputs)

                    #fake_adv_output = discriminator_model(appearances_preds)
                    #real_adv_output = discriminator_model(ground_truth_images)

                    (loss_app, loss_norm,
                     loss_env) = generator_model.loss_with_envmap(
                         appearances_preds, normals_preds, albedos_preds,
                         labels_appearance, labels_normals, masks,
                         envMap_preds, labels_envmap)

                    #gen_loss_adv = discriminator_model.generator_loss(fake_adv_output)

                    gen_loss = loss_app + loss_norm + loss_env  # + lambda_adv * gen_loss_adv
                    #dis_loss = discriminator_model.discriminator_loss(real_adv_output, fake_adv_output)

                gradients_of_generator = gen_tape.gradient(
                    gen_loss, generator_model.variables)
                #gradients_of_discriminator = disc_tape.gradient(dis_loss, discriminator_model.variables)

                generator_optimizer.apply_gradients(
                    zip(gradients_of_generator, generator_model.variables),
                    global_step=tf.train.get_or_create_global_step())
                # discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator,
                #                                             discriminator_model.variables),
                #                                         global_step=tf.train.get_or_create_global_step())

                training_step += 1

                if (training_step % test_every_n_steps == 0):
                    perform_testing = True
                    (albedos_preds_test, normals_preds_test,
                     appearances_preds_test,
                     envMap_preds_test) = generator_model(inputs_test)
                    testing_loss = generator_model.loss_testing(
                        appearances_preds_test, labels_appearance_test,
                        masks_test)

                if (training_step % show_every_n_steps == 0):
                    tf.contrib.summary.scalar("generator total loss", gen_loss)
                    tf.contrib.summary.scalar("generator appearance loss",
                                              loss_app)
                    tf.contrib.summary.scalar("generator envmap loss",
                                              loss_env)
                    tf.contrib.summary.scalar("generator normal loss",
                                              loss_norm)
                    # tf.contrib.summary.scalar("adversarial loss (generator)", gen_loss_adv)
                    # tf.contrib.summary.scalar("adversarial loss (discriminator)", dis_loss)
                    tf.contrib.summary.image(
                        "appearance input (label)",
                        tf.reshape(inputs[0], (1, mIm, nIm, 3)))
                    tf.contrib.summary.image(
                        "normal map (from 3DMM) ",
                        tf.reshape(labels_normals[0], (1, mIm, nIm, 3)))
                    albedo_show = tf.pow(albedos_preds[0], invGamma)
                    tf.contrib.summary.image(
                        "albedo prediction",
                        tf.reshape(albedo_show, (1, mIm_res, nIm_res, 3)))

                    albedoMapTensor = tf.reshape(gt_albedo[0],
                                                 (1, mIm_res, nIm_res, 3))
                    tf.contrib.summary.image("albedo ground truth",
                                             albedoMapTensor)

                    tf.contrib.summary.image(
                        "normal map prediction",
                        tf.reshape(normals_preds[0], (1, mIm_res, nIm_res, 3)))
                    tf.contrib.summary.image(
                        "appearance result",
                        tf.reshape(appearances_preds[0],
                                   (1, mIm_res, nIm_res, 3)))
                    if (predict_sphere_envMap[0]):
                        envMap_show = tonemap(envMap_preds[0], gamma)
                        tf.contrib.summary.image(
                            "envMap result",
                            tf.reshape(envMap_show, (1, mEnv, nEnv, 3)))
                        envMapTensorShow = tf.reshape(
                            tonemap(labels_envmap[0], gamma),
                            (1, mEnv, nEnv, 3))
                        tf.contrib.summary.image("envMap ground truth",
                                                 envMapTensorShow)

                    if (perform_testing):
                        tf.contrib.summary.scalar("Testing loss", testing_loss)
                        tf.contrib.summary.image(
                            "Testing appearance prediction",
                            tf.reshape(appearances_preds_test[0],
                                       (1, mIm_res, nIm_res, 3)))
                        tf.contrib.summary.image(
                            "Testing appearance label",
                            tf.reshape(labels_appearance_test[0],
                                       (1, mIm_res, nIm_res, 3)))
                        tf.contrib.summary.image(
                            "Testing normal map (from 3DMM) ",
                            tf.reshape(labels_normals_test[0],
                                       (1, mIm, nIm, 3)))
                        albedo_show = tf.pow(albedos_preds_test[0], invGamma)
                        tf.contrib.summary.image(
                            "Testing albedo prediction",
                            tf.reshape(albedo_show, (1, mIm_res, nIm_res, 3)))

                        albedoMapTensor = tf.reshape(gt_albedo_test[0],
                                                     (1, mIm_res, nIm_res, 3))
                        tf.contrib.summary.image("Testing albedo ground truth",
                                                 albedoMapTensor)

                        tf.contrib.summary.image(
                            "Testing normal map prediction",
                            tf.reshape(normals_preds_test[0],
                                       (1, mIm_res, nIm_res, 3)))

                if training_step % log_every_n_steps == 0:
                    # print("Iteration {}, batch: {} generator loss: {:.3f} ({:.3f}), discriminator loss: {:.3f}".
                    #       format(num_itr, batch, gen_loss.numpy(), gen_loss_adv.numpy(), dis_loss.numpy()))

                    print(
                        "Iteration {}, batch: {} Total generator loss: {:.3f} (appearance : {:.3f} - normal : {:.3f} - env: {:.3f}) "
                        .format(num_itr, batch,
                                log_losses_ratio * gen_loss.numpy(),
                                log_losses_ratio * loss_app.numpy(),
                                log_losses_ratio * loss_norm.numpy(),
                                log_losses_ratio * loss_env.numpy()))
                    if (perform_testing):
                        print("Testing loss : {:.3f} ".format(testing_loss))
                if training_step % write_every_n_steps == 0:
                    save_tensor_cv2(
                        appearances_preds[0],
                        result_training_folder_name + "Appearance.png")
                    save_tensor_cv2(envMap_preds[0],
                                    result_training_folder_name + "EnvMap.hdr",
                                    1)
                    save_tensor_cv2(normals_preds[0],
                                    result_training_folder_name + "Normal.png")
                    save_tensor_cv2(tf.pow(albedos_preds[0], invGamma),
                                    result_training_folder_name + "Albedo.png")

        num_itr = num_itr + 1

    root.save(checkpoint_write_prefix)
    return
コード例 #5
0
def main():
    input_shape = (256, 256, 3)
    mIm = input_shape[0]
    nIm = input_shape[1]
    # Load data
    data_folder_name = "Data01/"  # "Data01/" # "Data02_isomaps/"
    ratio_train = 0.75
    max_files = 500000
    (dataset_train, num_train, dataset_test, num_test) = \
        load_data(data_folder_name, input_shape, ratio_train, max_files,False)
    plot_data(dataset_train, False)
    plot_data(dataset_train, True)

    epochs = 500
    batch_size = 10
    num_batches = int(num_train / batch_size)

    learnable_envMap_size = 16
    envDir = "EnvMaps/"
    envName = envDir + "kitchen"
    envFile = envName + "_probe.hdr"
    envMap = imageio.imread(envName + '_probe.hdr', format='HDR-FI')
    (mEnv, nEnv, dEnv) = envMap.shape
    envMapTensor = tf.constant(envMap)
    if predict_envMap:
        mEnv = learnable_envMap_size
        nEnv = learnable_envMap_size
    else:
        envmap_resize_rate = 0.04
        mEnv = int(envmap_resize_rate * mEnv)
        nEnv = int(envmap_resize_rate * nEnv)

    envMapTensor = tf.image.resize(envMapTensor, tf.constant([mEnv, nEnv]))
    envMapTensor = tf.reshape(envMapTensor, [mEnv * nEnv, 3])
    print("(mEnv,nEnv) = {},{}".format(mEnv, nEnv))
    ## Calculate envMap orientations
    envVectors = envMapOrientation.envMapOrientation(mEnv, nEnv)
    envOrientationTensor = tf.constant(envVectors, dtype=tf.float32)
    envOrientationTensor = tf.reshape(envOrientationTensor, [3, mEnv * nEnv])
    envNormalization = tf.constant(math.pi * mEnv * nEnv / 4.)

    envMapMask = envMapOrientation.getMaskSphereMap(learnable_envMap_size,
                                                    learnable_envMap_size)
    #plt.imshow(envMapMask)
    #plt.show()
    envMapMaskTensor = tf.constant(envMapMask)

    autoencoder_model = SecondUVAutoencoder(input_shape, envOrientationTensor,
                                            envMapTensor, envNormalization,
                                            envMapMaskTensor)
    lambda_normal_variance = 0.2

    def loss_variance(mask, y):
        (mean, variance) = tf.nn.moments(tf.math.multiply(mask, y),
                                         axes=[0, 1, 2])
        return lambda_normal_variance * tf.norm(variance)

    optimizer = tf.train.AdamOptimizer()

    logdir = "./tb/"
    global_step = tf.train.get_or_create_global_step()
    summary_writer = tf.contrib.summary.create_file_writer(logdir,
                                                           flush_millis=10000)

    num_itr = 0
    while (num_itr < epochs):
        dataset_train = dataset_train.shuffle(num_train)
        x_train = dataset_train.batch(batch_size)
        for (batch, (inputs, labels, masks)) in enumerate(x_train):
            with summary_writer.as_default(
            ), tf.contrib.summary.always_record_summaries():
                with tfe.GradientTape() as tape:
                    (albedos_preds, normals_preds,
                     appearances_preds) = autoencoder_model(inputs)
                    loss = autoencoder_model.loss_l2_appearance(
                        appearances_preds, labels, masks)

                grads = tape.gradient(loss, autoencoder_model.variables)
                optimizer.apply_gradients(
                    zip(grads, autoencoder_model.variables),
                    global_step=tf.train.get_or_create_global_step())
                tf.contrib.summary.scalar("loss", loss)
                tf.contrib.summary.image(
                    "appearance input", tf.reshape(inputs[0],
                                                   (1, mIm, nIm, 3)))
                tf.contrib.summary.image(
                    "appearance label", tf.reshape(labels[0],
                                                   (1, mIm, nIm, 3)))
                tf.contrib.summary.image(
                    "albedo prediction",
                    tf.reshape(albedos_preds[0], (1, mIm, nIm, 3)))
                tf.contrib.summary.image(
                    "normal map prediction",
                    tf.reshape(normals_preds[0], (1, mIm, nIm, 3)))
                tf.contrib.summary.image(
                    "appearance result",
                    tf.reshape(appearances_preds[0], (1, mIm, nIm, 3)))
                if batch % 2 == 0:
                    print("Iteration {}, batch: {} loss: {:.3f}".format(
                        num_itr, batch, loss.numpy()))
        num_itr = num_itr + 1

    return
    plt.figure(figsize=(20, 4))
    i = 0
    for (batch, (image, label, mask)) in enumerate(x_train):
        # Original
        subplot = plt.subplot(2, 10, i + 1)
        plt.imshow(image[0])
        subplot.get_xaxis().set_visible(False)
        subplot.get_yaxis().set_visible(False)

        # Reconstruction
        subplot = plt.subplot(2, 10, i + 11)
        pred = autoencoder_model(image)
        pred = tf.reshape(pred, image.shape)
        plt.imshow(pred[0])
        subplot.get_xaxis().set_visible(False)
        subplot.get_yaxis().set_visible(False)
        i = i + 1
        if (i >= 10): break
    plt.show()

    return
コード例 #6
0
plt.imshow(envMapTensor)
plt.show()
plt.imshow(albedoTensor)
plt.show()
plt.imshow(albedo_boosted_Tensor)
plt.show()
# plt.imshow(normalTensor)
# plt.show()

if (predict_sphere_envMap[1]):
    envNormalization = tf.constant(math.pi * mEnv * nEnv / 4.)
else:
    envNormalization = tf.constant((float)(mEnv * nEnv))

## Calculate envMap orientations
envVectors = envMapOrientation.envMapOrientation(mEnv, nEnv,
                                                 predict_sphere_envMap[1])
envOrientationTensor = tf.constant(envVectors, dtype=tf.float32)
envOrientationTensor = tf.reshape(envOrientationTensor, [3, mEnv * nEnv])
envMapTensor = tf.reshape(envMapTensor, [mEnv * nEnv, 3])

autoencoder_model = FirstGenerator(input_shape, envOrientationTensor,
                                   envMapTensor, envNormalization,
                                   predict_sphere_envMap, high_res_mode)

normalTensor = tf.reshape(normalTensor, [1, mIm, nIm, d])
albedoTensor = tf.reshape(albedoTensor, [1, mIm, nIm, d])

# resTensor = autoencoder_model.render(normalTensor,albedoTensor)
resTensor = autoencoder_model.render_with_predicted_envMap(
    normalTensor, albedoTensor,
    tf.reshape(envMapTensor, [num_replicate, mEnv * nEnv, 3]))
コード例 #7
0
def main():
    plt.close("all")
    tf.enable_eager_execution()

    envDir = "EnvMaps/"
    envName = envDir + "uffizi"
    if (sphereProbeMode):
        envFile = envName + "_probe.hdr"
    else:
        envFile = envName + ".hdr"
    albedoMap = imageio.imread(
        "venv/Albedo.png")  #"Data02_isomaps/AppearanceMap_test.png")
    normalMap = imageio.imread(
        "venv/Normal.png")  #"Data02_isomaps/NormalMap_test.png")
    envMap = imageio.imread(envFile, format='HDR-FI')

    #print(np.amax(envMap))

    resize_rate = 0.05
    (mIm, nIm, dIm) = albedoMap.shape
    (mEnv, nEnv, dEnv) = envMap.shape
    input_shape = albedoMap.shape
    d = 3

    normalProcessed = normalMapProcessing.processNormalMap(normalMap, mIm, nIm)
    normalizingValue = tf.constant(255.)
    albedoTensor = tf.constant(albedoMap[:, :, :3], dtype=tf.float32)
    normalTensor = tf.constant(normalProcessed[:, :, :3], dtype=tf.float32)
    envMapTensor = tf.constant(envMap)
    albedoTensor = tf.scalar_mul(1. / normalizingValue, albedoTensor[:, :, :3])

    mEnv = int(resize_rate * mEnv)
    nEnv = int(resize_rate * nEnv)
    envMapTensor = tf.image.resize(envMapTensor, tf.constant([mEnv, nEnv]))
    envMapTensorBis = tf.reshape(envMapTensor, [1, mEnv * nEnv, 3])
    plt.imshow(envMapTensor)
    plt.show()
    envMapTensor = tf.reshape(envMapTensor, [mEnv * nEnv, 3])
    print((mEnv, nEnv))

    if (sphereProbeMode):
        envNormalization = tf.constant(math.pi * mEnv * nEnv / 4.)
    else:
        envNormalization = tf.constant((float)(mEnv * nEnv))

    ## Calculate envMap orientations
    envVectors = envMapOrientation.envMapOrientation(mEnv, nEnv,
                                                     sphereProbeMode)
    envOrientationTensor = tf.constant(envVectors, dtype=tf.float32)
    envOrientationTensor = tf.reshape(envOrientationTensor, [3, mEnv * nEnv])

    autoencoder_model = FirstUVAutoencoder(input_shape, envOrientationTensor,
                                           envMapTensor, envNormalization,
                                           (False, False), False)

    normalTensor = tf.reshape(normalTensor, [1, mIm, nIm, d])
    albedoTensor = tf.reshape(albedoTensor, [1, mIm, nIm, d])

    resTensor = autoencoder_model.render(normalTensor, albedoTensor)
    #resTensor = autoencoder_model.render_with_predicted_envMap(normalTensor, albedoTensor, envMapTensorBis)

    plt.imshow(resTensor[0])
    plt.show()
    return

    # normalTensor = tf.scalar_mul(1./normalizingValue, normalTensor)
    # normalTensor = (normalTensor - 0.5 ) * 2.

    normalTensor = tf.reshape(normalTensor, [mIm * nIm, 3])
    envOrientationTensor = tf.reshape(envOrientationTensor, [3, mEnv * nEnv])

    cosineTensor = tf.matmul(normalTensor, envOrientationTensor)
    cosineTensor = tf.clip_by_value(cosineTensor, 0, 1)
    # cosineTensor = tf.reshape(cosineTensor,[mIm,nIm,mEnv * nEnv])
    # resTensor = tf.get_variable("resTensor", (mIm,nIm,d),dtype = tf.float32,initializer = tf.zeros_initializer())

    envMapTensor = tf.reshape(envMapTensor, [mEnv * nEnv, 3])

    shadedBrightness = tf.matmul(cosineTensor, envMapTensor)
    shadedBrightness = tf.scalar_mul(
        1. / envNormalization, tf.reshape(shadedBrightness, [mIm, nIm, 3]))

    # unshadedBrightness = tf.scalar_mul(1./envNormalization,tf.reduce_sum(envMapTensor,[0,1]))

    gamma = 2.2
    resTensor = tf.multiply(albedoTensor, shadedBrightness)
    resTensorTM = tf.pow(resTensor, gamma)
    resTensor = tf.scalar_mul(normalizingValue, resTensor)
    resTensorTM = tf.scalar_mul(normalizingValue, resTensorTM)

    resIm = np.array(resTensor)
    resImTM = np.array(resTensorTM)
    plt.imshow(resIm.astype(int))
    plt.show()
    plt.imshow(resImTM.astype(int))
    plt.show()

    imageio.imsave("result_" + envName + ".jpg", resIm)
コード例 #8
0
def main():
    input_shape = (256, 256, 3)
    shape_gt_adv = (512, 512, 3)
    mIm = input_shape[0]
    nIm = input_shape[1]
    if (high_res_mode):
        mIm_res = shape_gt_adv[0]
        nIm_res = shape_gt_adv[1]
    else:
        mIm_res = mIm
        nIm_res = nIm
    # Load data
    data_input_folder_name = "Data04_isomaps_normals/"
    data_ground_truth_folder_name = "TexturesChicago/"
    ratio_train = 0.75
    max_files = 5000
    (dataset_adv_real,
     num_adv_real) = load_ground_truth_data(data_ground_truth_folder_name,
                                            resize_images=True,
                                            image_shape=shape_gt_adv)
    (dataset_train_input,
     num_train_input) = load_input_data_with_normals_and_replicate(
         data_input_folder_name, input_shape, True, num_adv_real)

    plot_data_bis(dataset_train_input, 0)
    plot_data_bis(dataset_train_input, 3)
    plot_data_bis(dataset_train_input, 2)

    epochs = 50
    batch_size = 5
    num_batches = int(num_train_input / batch_size)

    envDir = "EnvMaps/"
    envName = envDir + "village"
    envFile = envName + "_probe.hdr"
    envMap = load_rgb(envName, -1)
    (mEnv, nEnv, dEnv) = envMap.shape
    envMapTensor = tf.constant(envMap)
    if predict_sphere_envMap[0]:
        mEnv = learnable_envMap_size[0]
        nEnv = learnable_envMap_size[1]
    else:
        envmap_resize_rate = 0.04
        mEnv = int(envmap_resize_rate * mEnv)
        nEnv = int(envmap_resize_rate * nEnv)

    envMapTensor = tf.image.resize(envMapTensor, tf.constant([mEnv, nEnv]))
    envMapTensor = tf.reshape(envMapTensor, [mEnv * nEnv, 3])
    print("(mEnv,nEnv) = {},{}".format(mEnv, nEnv))
    ## Calculate envMap orientations
    envVectors = envMapOrientation.envMapOrientation(mEnv, nEnv,
                                                     predict_sphere_envMap[1])
    envOrientationTensor = tf.constant(envVectors, dtype=tf.float32)
    envOrientationTensor = tf.reshape(envOrientationTensor, [3, mEnv * nEnv])
    if (predict_sphere_envMap[1]):
        envNormalization = tf.constant(math.pi * mEnv * nEnv / 4.)
    else:
        envNormalization = tf.constant(float(mEnv * nEnv))

    envMapMask = envMapOrientation.getMaskSphereMap(mEnv, nEnv)
    #plt.imshow(envMapMask)
    #plt.show()
    envMapMaskTensor = tf.constant(envMapMask)

    checkpoint_write_dir = "model_saved"  #_10_samples_withEnvMap_OkResults/"
    checkpoint_load_dir = "model_10_samples_withEnvMap_OkResults/"
    checkpoint_write_prefix = checkpoint_write_dir + "adversarial"

    #autoencoder_model = SecondUVAutoencoder(input_shape, envOrientationTensor,envMapTensor, envNormalization, predict_envMap)
    generator_model = FirstGenerator(input_shape, envOrientationTensor,
                                     envMapTensor, envNormalization,
                                     predict_sphere_envMap, high_res_mode)
    discriminator_model = FirstDiscriminator()

    generator_optimizer = tf.train.AdamOptimizer()
    discriminator_optimizer = tf.train.AdamOptimizer(
        learning_rate=learning_rate_disc)

    logdir = "./tb/"
    global_step = tf.train.get_or_create_global_step()
    summary_writer = tf.contrib.summary.create_file_writer(logdir,
                                                           flush_millis=10000)

    root = tf.train.Checkpoint(
        generator_optimizer=generator_optimizer,
        discriminator_optimizer=discriminator_optimizer,
        generator_model=generator_model,
        discriminator_model=discriminator_model,
        optimizer_step=tf.train.get_or_create_global_step())

    if (load_pre_trained_model):
        root.restore(tf.train.latest_checkpoint(checkpoint_load_dir))
        dataset_train_input = dataset_train_input.shuffle(num_train_input)
        x_train = dataset_train_input.batch(1)
        for (batch, (inputs, labels_appearance, labels_normals,
                     masks)) in enumerate(x_train.take(4)):
            (albedos_preds, normals_preds, appearances_preds,
             envMap_preds) = generator_model(inputs)
            plt.imshow(appearances_preds[0])
        plt.show()
        return

    num_itr = 0
    while (num_itr < epochs):
        dataset_train_input = dataset_train_input.shuffle(num_train_input)
        x_train_input = dataset_train_input.batch(batch_size)
        dataset_adv_real = dataset_adv_real.shuffle(num_adv_real)
        x_train_adv_real = dataset_adv_real.batch(batch_size)
        for (batch, ((inputs, labels_appearance, labels_normals, masks), ground_truth_images)) \
                in enumerate(zip(x_train_input,x_train_adv_real)):
            #plt.imshow(ground_truth_images[0])
            #plt.show()
            with summary_writer.as_default(
            ), tf.contrib.summary.always_record_summaries():
                with tf.GradientTape() as gen_tape, tf.GradientTape(
                ) as disc_tape:
                    (albedos_preds, normals_preds, appearances_preds,
                     envMap_preds) = generator_model(inputs)

                    fake_adv_output = discriminator_model(appearances_preds)
                    real_adv_output = discriminator_model(ground_truth_images)

                    gen_loss_l2 = generator_model.loss(appearances_preds,
                                                       normals_preds,
                                                       albedos_preds,
                                                       labels_appearance,
                                                       labels_normals, masks)
                    gen_loss_adv = discriminator_model.generator_loss(
                        fake_adv_output)

                    gen_loss = gen_loss_l2 + lambda_adv * gen_loss_adv
                    dis_loss = discriminator_model.discriminator_loss(
                        real_adv_output, fake_adv_output)

                gradients_of_generator = gen_tape.gradient(
                    gen_loss, generator_model.variables)
                gradients_of_discriminator = disc_tape.gradient(
                    dis_loss, discriminator_model.variables)

                generator_optimizer.apply_gradients(
                    zip(gradients_of_generator, generator_model.variables))
                discriminator_optimizer.apply_gradients(
                    zip(gradients_of_discriminator,
                        discriminator_model.variables),
                    global_step=tf.train.get_or_create_global_step())

                if (batch % show_every_n_steps == 0):
                    tf.contrib.summary.scalar("generator total loss", gen_loss)
                    tf.contrib.summary.scalar("adversarial loss (generator)",
                                              gen_loss_adv)
                    tf.contrib.summary.scalar(
                        "adversarial loss (discriminator)", dis_loss)
                    tf.contrib.summary.image(
                        "appearance input",
                        tf.reshape(inputs[0], (1, mIm, nIm, 3)))
                    tf.contrib.summary.image(
                        "normal map (from 3DMM) ",
                        tf.reshape(labels_normals[0], (1, mIm, nIm, 3)))
                    tf.contrib.summary.image(
                        "albedo prediction",
                        tf.reshape(albedos_preds[0], (1, mIm_res, nIm_res, 3)))
                    tf.contrib.summary.image(
                        "normal map prediction",
                        tf.reshape(normals_preds[0], (1, mIm_res, nIm_res, 3)))
                    tf.contrib.summary.image(
                        "appearance result",
                        tf.reshape(appearances_preds[0],
                                   (1, mIm_res, nIm_res, 3)))
                    tf.contrib.summary.image(
                        "ground truth high res texture",
                        tf.reshape(ground_truth_images[0],
                                   (1, mIm_res, nIm_res, 3)))
                    if (predict_sphere_envMap[0]):
                        if (predict_sphere_envMap[1]):
                            envMap_show = tf.multiply(envMapMask,
                                                      envMap_preds[0])
                        else:
                            envMap_show = envMap_preds[0]
                        tf.contrib.summary.image(
                            "envMap result",
                            tf.reshape(envMap_show, (1, mEnv, nEnv, 3)))
                if batch % log_every_n_steps == 0:
                    print(
                        "Iteration {}, batch: {} generator loss: {:.3f} ({:.3f}), discriminator loss: {:.3f}"
                        .format(num_itr, batch, gen_loss.numpy(),
                                gen_loss_adv.numpy(), dis_loss.numpy()))
        num_itr = num_itr + 1

    root.save(checkpoint_write_prefix)
    return