def load_and_preprocess_image(path): image = load_rgb(path) resize_mode = (image.shape[0] != image_shape[1] and image.shape[1] != image_shape[0]) if (resize_mode): image = cv2.resize(image, (image_shape[1], image_shape[0])) image = image.astype(float) / 255.0 # normalize to [0,1] range return tf.constant(image, dtype=tf.float32)
def load_envMaps(data_folder_name, num_images, input_shape): data_root = pathlib.Path(data_folder_name) all_image_paths = list(data_root.glob('*.hdr')) all_image_paths = [str(path) for path in all_image_paths] res = np.zeros( [num_images, input_shape[0], input_shape[1], input_shape[2]]) counter = 0 for path in all_image_paths: envMap = load_rgb(path, -1) envMap = cv2.resize(envMap, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_LINEAR) res[counter, :, :, :] = envMap[:, :, :] counter += 1 return res
def main(): plt.close("all") tf.enable_eager_execution() codes = [ "S1_I1", "S1_I2", "S1_I3", "S2_I1", "S2_I2", "S2_I3", "S3_I1", "S3_I2", "S3_I3", "S4_I1", "S4_I2", "S4_I3" ] # code = "S1_I1" for code in codes: data_folder = "Data_synthesized/" envDir = data_folder + "EnvMap/" envName = envDir + code + "_Illum" if (predict_sphere_envMap[1]): envFile = envName + "_probe.hdr" else: envFile = envName + ".hdr" albedo_folder = data_folder + "Albedo/" normal_folder = data_folder + "AppearanceNormal/" albedo_file = albedo_folder + code + ".png" normal_file = normal_folder + code + "_Normal_UV.png" albedoMap = load_rgba( albedo_file) # "Data02_isomaps/AppearanceMap_test.png") normalMap = load_rgb( normal_file) # "Data02_isomaps/NormalMap_test.png") data_input_folder_name = "Data_to_synthesize" input_shape = (256, 256, 3) num_replicate = 1 (dataset_train_input, num_train_input) = load_input_data_with_normals_and_replicate( data_input_folder_name, input_shape, True, num_replicate) envMap = load_rgb(envFile, -1) print("Before resize : max: {:.3f}, mean: ({:.3f})".format( np.amax(envMap), np.mean(envMap))) (mEnv, nEnv, dEnv) = envMap.shape print((mEnv, nEnv)) plt.imshow(envMap) plt.show() albedoMap = cv2.resize(albedoMap, (256, 256)) normalMap = cv2.resize(normalMap, (256, 256)) envMap = cv2.resize(envMap, (64, 32), interpolation=cv2.INTER_LINEAR) print("After resize : max: {:.3f}, mean: ({:.3f})".format( np.amax(envMap), np.mean(envMap))) (mIm, nIm, dIm) = albedoMap.shape (mEnv, nEnv, dEnv) = envMap.shape input_shape = albedoMap.shape d = 3 plt.imshow(envMap) plt.show() plt.imshow(albedoMap) plt.show() plt.imshow(normalMap) plt.show() # cv2.namedWindow('albedo', cv2.WINDOW_NORMAL) # cv2.imshow('albedo', albedoMap) # cv2.namedWindow('normal', cv2.WINDOW_NORMAL) # cv2.imshow('normal', normalMap) # cv2.namedWindow('envMap', cv2.WINDOW_NORMAL) # cv2.imshow('envMap', envMap) # cv2.waitKey(0) gamma = tf.constant(2.2) invGamma = tf.constant(1. / 2.2) normalizingValue = tf.constant(255.) albedoTensor = tf.constant(albedoMap[:, :, :3], dtype=tf.float32) normalTensor = tf.constant(normalMap[:, :, :3], dtype=tf.float32) envMapTensor = tf.constant(envMap, dtype=tf.float32) albedoTensor = tf.scalar_mul(1. / normalizingValue, albedoTensor[:, :, :3]) normalTensor = tf.scalar_mul(1. / normalizingValue, normalTensor[:, :, :3]) # albedoTensor = tf.pow(albedoTensor,gamma) if (predict_sphere_envMap[1]): envNormalization = tf.constant(math.pi * mEnv * nEnv / 4.) else: envNormalization = tf.constant((float)(mEnv * nEnv)) ## Calculate envMap orientations envVectors = envMapOrientation.envMapOrientation( mEnv, nEnv, predict_sphere_envMap[1]) envOrientationTensor = tf.constant(envVectors, dtype=tf.float32) envOrientationTensor = tf.reshape(envOrientationTensor, [3, mEnv * nEnv]) envMapTensor = tf.reshape(envMapTensor, [mEnv * nEnv, 3]) autoencoder_model = FirstGenerator(input_shape, envOrientationTensor, envMapTensor, envNormalization, predict_sphere_envMap, high_res_mode) normalTensor = tf.reshape(normalTensor, [1, mIm, nIm, d]) albedoTensor = tf.reshape(albedoTensor, [1, mIm, nIm, d]) # resTensor = autoencoder_model.render(normalTensor,albedoTensor) resTensor = autoencoder_model.render_with_predicted_envMap( normalTensor, albedoTensor, tf.reshape(envMapTensor, [num_replicate, mEnv * nEnv, 3])) resTensorGamma = tf.pow(resTensor, invGamma) res_save = 255. * np.array(resTensorGamma[0]) res_save = convert_rgb_to_cv2(res_save) res_save_temp = np.zeros([res_save.shape[0], res_save.shape[1], 4]) res_save_temp[:, :, :3] = res_save[:, :, :] res_save_temp[:, :, 3] = albedoMap[:, :, 3] res_save = res_save_temp.astype(int) plt.imshow(resTensor[0]) plt.show() plt.imshow(resTensorGamma[0]) plt.show() cv2.imwrite(normal_folder + code + "_Appearance_UV.png", res_save) # x_train_input = dataset_train_input.batch(num_replicate) # for (batch, (inputs, labels_appearance, labels_normals, masks)) \ # in enumerate(x_train_input): # resTensor = autoencoder_model.render_with_predicted_envMap(labels_normals, inputs, # tf.reshape(envMapsTensors, # [num_replicate, mEnv * nEnv, 3])) # resTensorGamma = tf.pow(resTensor, invGamma) # res_save = 255. * np.array(resTensorGamma[0]) # res_save = convert_rgb_to_cv2(res_save) # res_save_temp = np.zeros([res_save.shape[0], res_save.shape[1], 4]) # res_save_temp[:, :, :3] = res_save[:, :, :] # res_save_temp[:, :, 3] = albedoMap[:, :, 3] # res_save = res_save_temp.astype(int) # # plt.imshow(envMapsTensors[0]) # plt.show() # plt.imshow(resTensorGamma[0]) # plt.show() # plt.imshow(envMapsTensors[1]) # plt.show() # plt.imshow(resTensorGamma[1]) # plt.show() # # return return
from load_rgb_cv import load_rgb, convert_rgb_to_cv2 import cv2 import matplotlib.pyplot as plt image_file = "Chicago_albedo.png" output_file = "Chicago_albedo_test.png" image1 = load_rgb(image_file) plt.imshow(image1) plt.show() print("image1 : {}".format(image1[150, 150, 1])) cv2.imwrite(output_file, convert_rgb_to_cv2(image1)) image2 = load_rgb(output_file) plt.imshow(image2) plt.show() print("image2 : {}".format(image2[150, 150, 1]))
def main(): plt.close("all") tf.enable_eager_execution() folder_write_name = "Data_synthesized/" extension_nor_app = "AppearanceNormal/" extension_env = "EnvMap/" code_sample = "S4_I3_" envDir = "EnvMaps/" envName = envDir + "STUDIOATM_13SN" if (predict_sphere_envMap[1]): envFile = envName + "_probe.hdr" else: #envFile = folder_write_name + extension_env + code_sample + "Illum.hdr" envFile = envName + ".hdr" albedoMap = load_rgb( "Chicago_albedo.png") #"Data02_isomaps/AppearanceMap_test.png") normalMap = load_rgb( "Chicago_normal.png") #"Data02_isomaps/NormalMap_test.png") envMap = load_rgb(envFile, -1) print("Before resize : max: {:.3f}, mean: ({:.3f})".format( np.amax(envMap), np.mean(envMap))) shutil.copy( "Chicago_normal.png", folder_write_name + extension_nor_app + code_sample + "Normal_UV.png") shutil.copy(envFile, folder_write_name + extension_env + code_sample + "Illum.hdr") (mEnv, nEnv, dEnv) = envMap.shape print((mEnv, nEnv)) plt.imshow(envMap) plt.show() albedoMap = cv2.resize(albedoMap, (256, 256)) normalMap = cv2.resize(normalMap, (256, 256)) envMap = cv2.resize(envMap, (3 * 32, 3 * 16), interpolation=cv2.INTER_LINEAR) print("After resize : max: {:.3f}, mean: ({:.3f})".format( np.amax(envMap), np.mean(envMap))) (mIm, nIm, dIm) = albedoMap.shape (mEnv, nEnv, dEnv) = envMap.shape input_shape = albedoMap.shape d = 3 plt.imshow(envMap) plt.show() plt.imshow(albedoMap) plt.show() plt.imshow(normalMap) plt.show() # cv2.namedWindow('albedo', cv2.WINDOW_NORMAL) # cv2.imshow('albedo', albedoMap) # cv2.namedWindow('normal', cv2.WINDOW_NORMAL) # cv2.imshow('normal', normalMap) # cv2.namedWindow('envMap', cv2.WINDOW_NORMAL) # cv2.imshow('envMap', envMap) # cv2.waitKey(0) gamma = tf.constant(2.2) invGamma = tf.constant(1. / 2.2) normalizingValue = tf.constant(255.) albedoTensor = tf.constant(albedoMap[:, :, :3], dtype=tf.float32) normalTensor = tf.constant(normalMap[:, :, :3], dtype=tf.float32) envMapTensor = tf.constant(envMap, dtype=tf.float32) albedoTensor = tf.scalar_mul(1. / normalizingValue, albedoTensor[:, :, :3]) normalTensor = tf.scalar_mul(1. / normalizingValue, normalTensor[:, :, :3]) #albedoTensor = tf.pow(albedoTensor,gamma) if (predict_sphere_envMap[1]): envNormalization = tf.constant(math.pi * mEnv * nEnv / 4.) else: envNormalization = tf.constant((float)(mEnv * nEnv)) ## Calculate envMap orientations envVectors = envMapOrientation.envMapOrientation(mEnv, nEnv, predict_sphere_envMap[1]) envOrientationTensor = tf.constant(envVectors, dtype=tf.float32) envOrientationTensor = tf.reshape(envOrientationTensor, [3, mEnv * nEnv]) envMapTensor = tf.reshape(envMapTensor, [mEnv * nEnv, 3]) autoencoder_model = FirstGenerator(input_shape, envOrientationTensor, envMapTensor, envNormalization, predict_sphere_envMap, high_res_mode) normalTensor = tf.reshape(normalTensor, [1, mIm, nIm, d]) albedoTensor = tf.reshape(albedoTensor, [1, mIm, nIm, d]) resTensor = autoencoder_model.render(normalTensor, albedoTensor) resTensorGamma = tf.pow(resTensor, invGamma) res_save = 255. * np.array(resTensorGamma[0]) res_save = convert_rgb_to_cv2(res_save) res_save = res_save.astype(int) plt.imshow(resTensor[0]) plt.show() plt.imshow(resTensorGamma[0]) plt.show() #cv2.imwrite(folder_name+ "envMap_resized.hdr" , envMap) cv2.imwrite( folder_write_name + extension_nor_app + code_sample + "Appearance_UV.png", res_save) #cv2.imwrite(folder_name+ "normalMap_resized.png" , normalMap) return
def main(): input_shape = (256, 256, 3) shape_gt_adv = (512, 512, 3) mIm = input_shape[0] nIm = input_shape[1] if (high_res_mode): mIm_res = shape_gt_adv[0] nIm_res = shape_gt_adv[1] else: mIm_res = mIm nIm_res = nIm # Load data data_training_folder_name = "Synthesized_Training_1tris/" data_testing_folder_name = "Synthesized_Testing_1/" result_training_folder_name = data_training_folder_name + "Results/" (dataset_train_input, num_train_input) = load_input_data_with_albedo_and_envmaps( data_training_folder_name, learnable_envMap_size, input_shape, True) (dataset_test_input, num_test_input) = load_input_data_with_albedo_and_envmaps( data_testing_folder_name, learnable_envMap_size, input_shape, True) dataset_train_input = dataset_train_input.shuffle(num_train_input) x_train = dataset_train_input.batch(1) indices_plot = [0, 2, 3, 4] #,5] plot_data_batches(x_train, indices_plot, 4) num_batches = int(num_train_input / batch_size) #albedoMapGT = load_rgb("Chicago_albedo.png") #albedoMapGT = cv2.resize(albedoMapGT, (256,256)) #albedoMapTensor = tf.constant(albedoMapGT) #albedoMapTensor = tf.reshape(albedoMapTensor, [1,256,256,3]) envDir = "EnvMaps/" envName = envDir + "village.hdr" envMap = load_rgb(envName, -1) (mEnv, nEnv, dEnv) = envMap.shape envMap = cv2.resize(envMap, (32, 16), interpolation=cv2.INTER_LINEAR) for (batch, (inputs, labels_appearance, labels_normals, masks, gt_albedo, labels_envmap)) in enumerate(dataset_train_input.take(1)): envMapTensor = labels_envmap envMapTensor = tf.reshape(envMapTensor, [1, 16, 32, 3]) envMapTensorShow = tonemap(envMapTensor, gamma) plt.imshow(envMapTensorShow[0]) plt.show() if predict_sphere_envMap[0]: mEnv = learnable_envMap_size[0] nEnv = learnable_envMap_size[1] else: envmap_resize_rate = 0.04 mEnv = int(envmap_resize_rate * mEnv) nEnv = int(envmap_resize_rate * nEnv) print("(mEnv,nEnv) = {},{}".format(mEnv, nEnv)) ## Calculate envMap orientations envVectors = envMapOrientation.envMapOrientation(mEnv, nEnv, predict_sphere_envMap[1]) envOrientationTensor = tf.constant(envVectors, dtype=tf.float32) envOrientationTensor = tf.reshape(envOrientationTensor, [3, mEnv * nEnv]) if (predict_sphere_envMap[1]): envNormalization = tf.constant(math.pi * mEnv * nEnv / 4.) else: envNormalization = tf.constant(float(mEnv * nEnv)) checkpoint_write_dir = "model_saved" #_10_samples_withEnvMap_OkResults/" checkpoint_load_dir = "model_10_samples_withEnvMap_OkResults/" checkpoint_write_prefix = checkpoint_write_dir + "adversarial" #autoencoder_model = SecondUVAutoencoder(input_shape, envOrientationTensor,envMapTensor, envNormalization, predict_envMap) generator_model = SecondGenerator(input_shape, envOrientationTensor, envMapTensor, envNormalization, predict_sphere_envMap, high_res_mode) #discriminator_model = FirstDiscriminator() generator_optimizer = tf.train.AdamOptimizer() #discriminator_optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate_disc) logdir = "./tb/" global_step = tf.train.get_or_create_global_step() summary_writer = tf.contrib.summary.create_file_writer(logdir, flush_millis=10000) log_losses_ratio = 100. root = tf.train.Checkpoint(generator_model=generator_model) if (load_pre_trained_model): root.restore(tf.train.latest_checkpoint(checkpoint_load_dir)) dataset_train_input = dataset_train_input.shuffle(num_train_input) x_train = dataset_train_input.batch(1) for (batch, (inputs, labels_appearance, labels_normals, masks)) in enumerate(x_train.take(4)): (albedos_preds, normals_preds, appearances_preds, envMap_preds) = generator_model(inputs) plt.imshow(appearances_preds[0]) plt.show() return num_itr = 0 training_step = -1 while (num_itr < epochs): dataset_train_input = dataset_train_input.shuffle(num_train_input) x_train_input = dataset_train_input.batch(batch_size) x_test_input = dataset_test_input.shuffle(num_test_input).batch(1) for (batch, ((inputs, labels_appearance, labels_normals, masks, gt_albedo, labels_envmap), \ (inputs_test, labels_appearance_test, labels_normals_test, masks_test, gt_albedo_test, labels_envmap_test))) \ in enumerate(zip(x_train_input,x_test_input)): #plt.imshow(ground_truth_images[0]) #plt.show() perform_testing = False with summary_writer.as_default( ), tf.contrib.summary.always_record_summaries(): with tf.GradientTape( ) as gen_tape: #, tf.GradientTape() as disc_tape: (albedos_preds, normals_preds, appearances_preds, envMap_preds) = generator_model(inputs) #fake_adv_output = discriminator_model(appearances_preds) #real_adv_output = discriminator_model(ground_truth_images) (loss_app, loss_norm, loss_env) = generator_model.loss_with_envmap( appearances_preds, normals_preds, albedos_preds, labels_appearance, labels_normals, masks, envMap_preds, labels_envmap) #gen_loss_adv = discriminator_model.generator_loss(fake_adv_output) gen_loss = loss_app + loss_norm + loss_env # + lambda_adv * gen_loss_adv #dis_loss = discriminator_model.discriminator_loss(real_adv_output, fake_adv_output) gradients_of_generator = gen_tape.gradient( gen_loss, generator_model.variables) #gradients_of_discriminator = disc_tape.gradient(dis_loss, discriminator_model.variables) generator_optimizer.apply_gradients( zip(gradients_of_generator, generator_model.variables), global_step=tf.train.get_or_create_global_step()) # discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, # discriminator_model.variables), # global_step=tf.train.get_or_create_global_step()) training_step += 1 if (training_step % test_every_n_steps == 0): perform_testing = True (albedos_preds_test, normals_preds_test, appearances_preds_test, envMap_preds_test) = generator_model(inputs_test) testing_loss = generator_model.loss_testing( appearances_preds_test, labels_appearance_test, masks_test) if (training_step % show_every_n_steps == 0): tf.contrib.summary.scalar("generator total loss", gen_loss) tf.contrib.summary.scalar("generator appearance loss", loss_app) tf.contrib.summary.scalar("generator envmap loss", loss_env) tf.contrib.summary.scalar("generator normal loss", loss_norm) # tf.contrib.summary.scalar("adversarial loss (generator)", gen_loss_adv) # tf.contrib.summary.scalar("adversarial loss (discriminator)", dis_loss) tf.contrib.summary.image( "appearance input (label)", tf.reshape(inputs[0], (1, mIm, nIm, 3))) tf.contrib.summary.image( "normal map (from 3DMM) ", tf.reshape(labels_normals[0], (1, mIm, nIm, 3))) albedo_show = tf.pow(albedos_preds[0], invGamma) tf.contrib.summary.image( "albedo prediction", tf.reshape(albedo_show, (1, mIm_res, nIm_res, 3))) albedoMapTensor = tf.reshape(gt_albedo[0], (1, mIm_res, nIm_res, 3)) tf.contrib.summary.image("albedo ground truth", albedoMapTensor) tf.contrib.summary.image( "normal map prediction", tf.reshape(normals_preds[0], (1, mIm_res, nIm_res, 3))) tf.contrib.summary.image( "appearance result", tf.reshape(appearances_preds[0], (1, mIm_res, nIm_res, 3))) if (predict_sphere_envMap[0]): envMap_show = tonemap(envMap_preds[0], gamma) tf.contrib.summary.image( "envMap result", tf.reshape(envMap_show, (1, mEnv, nEnv, 3))) envMapTensorShow = tf.reshape( tonemap(labels_envmap[0], gamma), (1, mEnv, nEnv, 3)) tf.contrib.summary.image("envMap ground truth", envMapTensorShow) if (perform_testing): tf.contrib.summary.scalar("Testing loss", testing_loss) tf.contrib.summary.image( "Testing appearance prediction", tf.reshape(appearances_preds_test[0], (1, mIm_res, nIm_res, 3))) tf.contrib.summary.image( "Testing appearance label", tf.reshape(labels_appearance_test[0], (1, mIm_res, nIm_res, 3))) tf.contrib.summary.image( "Testing normal map (from 3DMM) ", tf.reshape(labels_normals_test[0], (1, mIm, nIm, 3))) albedo_show = tf.pow(albedos_preds_test[0], invGamma) tf.contrib.summary.image( "Testing albedo prediction", tf.reshape(albedo_show, (1, mIm_res, nIm_res, 3))) albedoMapTensor = tf.reshape(gt_albedo_test[0], (1, mIm_res, nIm_res, 3)) tf.contrib.summary.image("Testing albedo ground truth", albedoMapTensor) tf.contrib.summary.image( "Testing normal map prediction", tf.reshape(normals_preds_test[0], (1, mIm_res, nIm_res, 3))) if training_step % log_every_n_steps == 0: # print("Iteration {}, batch: {} generator loss: {:.3f} ({:.3f}), discriminator loss: {:.3f}". # format(num_itr, batch, gen_loss.numpy(), gen_loss_adv.numpy(), dis_loss.numpy())) print( "Iteration {}, batch: {} Total generator loss: {:.3f} (appearance : {:.3f} - normal : {:.3f} - env: {:.3f}) " .format(num_itr, batch, log_losses_ratio * gen_loss.numpy(), log_losses_ratio * loss_app.numpy(), log_losses_ratio * loss_norm.numpy(), log_losses_ratio * loss_env.numpy())) if (perform_testing): print("Testing loss : {:.3f} ".format(testing_loss)) if training_step % write_every_n_steps == 0: save_tensor_cv2( appearances_preds[0], result_training_folder_name + "Appearance.png") save_tensor_cv2(envMap_preds[0], result_training_folder_name + "EnvMap.hdr", 1) save_tensor_cv2(normals_preds[0], result_training_folder_name + "Normal.png") save_tensor_cv2(tf.pow(albedos_preds[0], invGamma), result_training_folder_name + "Albedo.png") num_itr = num_itr + 1 root.save(checkpoint_write_prefix) return
envDir = "EnvMaps/" envName = "village" # field field2 field3 field4 field5 park Uffizi village harbour_bright village2 if (predict_sphere_envMap[1]): envFile = envDir + envName + "_probe.hdr" else: envFile = envDir + envName + ".hdr" input_data_folder = "/Users/benjaminbarral/Documents/Academic/UCL/Research Project/3DMMExpe/TexturesChicagoCorrected/" #_Appearance_UV.png" output_data_folder = "Synthesized_Data/TobiasMail/" code = "CFD-AF-207-023-N" code_write = "" albedo_file = input_data_folder + code + "_Appearance_UV.png" normal_file = input_data_folder + code + "_Normal_UV.png" albedoMap = load_rgba(albedo_file) # "Data02_isomaps/AppearanceMap_test.png") normalMap = load_rgb(normal_file) # "Data02_isomaps/NormalMap_test.png") data_input_folder_name = "Data_to_synthesize" input_shape = (512, 512, 3) num_replicate = 1 envMap = load_rgb(envFile, -1) print("Before resize : max: {:.3f}, mean: ({:.3f})".format( np.amax(envMap), np.mean(envMap))) (mEnv, nEnv, dEnv) = envMap.shape print((mEnv, nEnv)) plt.imshow(envMap) plt.show() albedoMap = cv2.resize(albedoMap, (input_shape[0], input_shape[1]))
def main(): input_shape = (256, 256, 3) shape_gt_adv = (512, 512, 3) mIm = input_shape[0] nIm = input_shape[1] if (high_res_mode): mIm_res = shape_gt_adv[0] nIm_res = shape_gt_adv[1] else: mIm_res = mIm nIm_res = nIm # Load data data_input_folder_name = "Data04_isomaps_normals/" data_ground_truth_folder_name = "TexturesChicago/" ratio_train = 0.75 max_files = 5000 (dataset_adv_real, num_adv_real) = load_ground_truth_data(data_ground_truth_folder_name, resize_images=True, image_shape=shape_gt_adv) (dataset_train_input, num_train_input) = load_input_data_with_normals_and_replicate( data_input_folder_name, input_shape, True, num_adv_real) plot_data_bis(dataset_train_input, 0) plot_data_bis(dataset_train_input, 3) plot_data_bis(dataset_train_input, 2) epochs = 50 batch_size = 5 num_batches = int(num_train_input / batch_size) envDir = "EnvMaps/" envName = envDir + "village" envFile = envName + "_probe.hdr" envMap = load_rgb(envName, -1) (mEnv, nEnv, dEnv) = envMap.shape envMapTensor = tf.constant(envMap) if predict_sphere_envMap[0]: mEnv = learnable_envMap_size[0] nEnv = learnable_envMap_size[1] else: envmap_resize_rate = 0.04 mEnv = int(envmap_resize_rate * mEnv) nEnv = int(envmap_resize_rate * nEnv) envMapTensor = tf.image.resize(envMapTensor, tf.constant([mEnv, nEnv])) envMapTensor = tf.reshape(envMapTensor, [mEnv * nEnv, 3]) print("(mEnv,nEnv) = {},{}".format(mEnv, nEnv)) ## Calculate envMap orientations envVectors = envMapOrientation.envMapOrientation(mEnv, nEnv, predict_sphere_envMap[1]) envOrientationTensor = tf.constant(envVectors, dtype=tf.float32) envOrientationTensor = tf.reshape(envOrientationTensor, [3, mEnv * nEnv]) if (predict_sphere_envMap[1]): envNormalization = tf.constant(math.pi * mEnv * nEnv / 4.) else: envNormalization = tf.constant(float(mEnv * nEnv)) envMapMask = envMapOrientation.getMaskSphereMap(mEnv, nEnv) #plt.imshow(envMapMask) #plt.show() envMapMaskTensor = tf.constant(envMapMask) checkpoint_write_dir = "model_saved" #_10_samples_withEnvMap_OkResults/" checkpoint_load_dir = "model_10_samples_withEnvMap_OkResults/" checkpoint_write_prefix = checkpoint_write_dir + "adversarial" #autoencoder_model = SecondUVAutoencoder(input_shape, envOrientationTensor,envMapTensor, envNormalization, predict_envMap) generator_model = FirstGenerator(input_shape, envOrientationTensor, envMapTensor, envNormalization, predict_sphere_envMap, high_res_mode) discriminator_model = FirstDiscriminator() generator_optimizer = tf.train.AdamOptimizer() discriminator_optimizer = tf.train.AdamOptimizer( learning_rate=learning_rate_disc) logdir = "./tb/" global_step = tf.train.get_or_create_global_step() summary_writer = tf.contrib.summary.create_file_writer(logdir, flush_millis=10000) root = tf.train.Checkpoint( generator_optimizer=generator_optimizer, discriminator_optimizer=discriminator_optimizer, generator_model=generator_model, discriminator_model=discriminator_model, optimizer_step=tf.train.get_or_create_global_step()) if (load_pre_trained_model): root.restore(tf.train.latest_checkpoint(checkpoint_load_dir)) dataset_train_input = dataset_train_input.shuffle(num_train_input) x_train = dataset_train_input.batch(1) for (batch, (inputs, labels_appearance, labels_normals, masks)) in enumerate(x_train.take(4)): (albedos_preds, normals_preds, appearances_preds, envMap_preds) = generator_model(inputs) plt.imshow(appearances_preds[0]) plt.show() return num_itr = 0 while (num_itr < epochs): dataset_train_input = dataset_train_input.shuffle(num_train_input) x_train_input = dataset_train_input.batch(batch_size) dataset_adv_real = dataset_adv_real.shuffle(num_adv_real) x_train_adv_real = dataset_adv_real.batch(batch_size) for (batch, ((inputs, labels_appearance, labels_normals, masks), ground_truth_images)) \ in enumerate(zip(x_train_input,x_train_adv_real)): #plt.imshow(ground_truth_images[0]) #plt.show() with summary_writer.as_default( ), tf.contrib.summary.always_record_summaries(): with tf.GradientTape() as gen_tape, tf.GradientTape( ) as disc_tape: (albedos_preds, normals_preds, appearances_preds, envMap_preds) = generator_model(inputs) fake_adv_output = discriminator_model(appearances_preds) real_adv_output = discriminator_model(ground_truth_images) gen_loss_l2 = generator_model.loss(appearances_preds, normals_preds, albedos_preds, labels_appearance, labels_normals, masks) gen_loss_adv = discriminator_model.generator_loss( fake_adv_output) gen_loss = gen_loss_l2 + lambda_adv * gen_loss_adv dis_loss = discriminator_model.discriminator_loss( real_adv_output, fake_adv_output) gradients_of_generator = gen_tape.gradient( gen_loss, generator_model.variables) gradients_of_discriminator = disc_tape.gradient( dis_loss, discriminator_model.variables) generator_optimizer.apply_gradients( zip(gradients_of_generator, generator_model.variables)) discriminator_optimizer.apply_gradients( zip(gradients_of_discriminator, discriminator_model.variables), global_step=tf.train.get_or_create_global_step()) if (batch % show_every_n_steps == 0): tf.contrib.summary.scalar("generator total loss", gen_loss) tf.contrib.summary.scalar("adversarial loss (generator)", gen_loss_adv) tf.contrib.summary.scalar( "adversarial loss (discriminator)", dis_loss) tf.contrib.summary.image( "appearance input", tf.reshape(inputs[0], (1, mIm, nIm, 3))) tf.contrib.summary.image( "normal map (from 3DMM) ", tf.reshape(labels_normals[0], (1, mIm, nIm, 3))) tf.contrib.summary.image( "albedo prediction", tf.reshape(albedos_preds[0], (1, mIm_res, nIm_res, 3))) tf.contrib.summary.image( "normal map prediction", tf.reshape(normals_preds[0], (1, mIm_res, nIm_res, 3))) tf.contrib.summary.image( "appearance result", tf.reshape(appearances_preds[0], (1, mIm_res, nIm_res, 3))) tf.contrib.summary.image( "ground truth high res texture", tf.reshape(ground_truth_images[0], (1, mIm_res, nIm_res, 3))) if (predict_sphere_envMap[0]): if (predict_sphere_envMap[1]): envMap_show = tf.multiply(envMapMask, envMap_preds[0]) else: envMap_show = envMap_preds[0] tf.contrib.summary.image( "envMap result", tf.reshape(envMap_show, (1, mEnv, nEnv, 3))) if batch % log_every_n_steps == 0: print( "Iteration {}, batch: {} generator loss: {:.3f} ({:.3f}), discriminator loss: {:.3f}" .format(num_itr, batch, gen_loss.numpy(), gen_loss_adv.numpy(), dis_loss.numpy())) num_itr = num_itr + 1 root.save(checkpoint_write_prefix) return
def render_face(envName, input_code, output_code, log_and_show, write_results, albedo_mode): if (predict_sphere_envMap[1]): envFile = envDir + envName + "_probe.hdr" else: envFile = envDir + envName + ".hdr" albedo_file = input_data_folder + input_code + "_Appearance_UV.png" normal_file = input_data_folder + input_code + "_Normal_UV.png" # LOAD albedoMap = load_rgba( albedo_file) # "Data02_isomaps/AppearanceMap_test.png") normalMap = load_rgb(normal_file) # "Data02_isomaps/NormalMap_test.png") envMap = load_rgb(envFile, -1) if (write_results): shutil.copy(envFile, output_data_folder + output_code + "_Illum.hdr") (mEnv_input, nEnv_input, dEnv) = envMap.shape if (log_and_show): print("Before resize : max: {:.3f}, mean: ({:.3f})".format( np.amax(envMap), np.mean(envMap))) print("Input envmap size : ({:},{:})".format(mEnv_input, nEnv_input)) plt.imshow(envMap) plt.show() #RESIZE albedoMap = cv2.resize(albedoMap, (nIm, mIm)) normalMap = cv2.resize(normalMap, (nIm, mIm)) envMap = cv2.resize(envMap, (nEnv, mEnv), interpolation=cv2.INTER_LINEAR) if (log_and_show): print("After resize : max: {:.3f}, mean: ({:.3f})".format( np.amax(envMap), np.mean(envMap))) d = 3 albedoTensor = tf.constant(albedoMap[:, :, :3], dtype=tf.float32) normalTensor = tf.constant(normalMap[:, :, :3], dtype=tf.float32) envMapTensor = tf.constant(envMap, dtype=tf.float32) albedoTensor = tf.scalar_mul(1. / normalizingValue, albedoTensor[:, :, :3]) normalTensor = tf.scalar_mul(1. / normalizingValue, normalTensor[:, :, :3]) # Boost albedo albedo_boosted_Tensor = tf.scalar_mul(tf.constant(albedo_boosting_factor), tf.pow(albedoTensor, gamma)) if (log_and_show): plt.imshow(envMapTensor) plt.show() plt.imshow(albedoTensor) plt.show() plt.imshow(tf.pow(albedo_boosted_Tensor, invGamma)) plt.show() # plt.imshow(normalTensor) # plt.show() envMapTensor = tf.reshape(envMapTensor, [mEnv * nEnv, 3]) normalTensor = tf.reshape(normalTensor, [1, mIm, nIm, d]) albedoTensor = tf.reshape(albedoTensor, [1, mIm, nIm, d]) autoencoder_model = FirstGenerator(input_shape, envOrientationTensor, envMapTensor, envNormalization, predict_sphere_envMap, high_res_mode) # resTensor = autoencoder_model.render(normalTensor,albedoTensor) if albedo_mode == 0: albedo_render = albedoTensor albedo_save = albedoMap else: albedo_render = albedo_boosted_Tensor albedo_save = 255. * np.array(tf.pow(albedo_boosted_Tensor, invGamma)) resTensor = autoencoder_model.render_with_predicted_envMap( normalTensor, albedo_render, tf.reshape(envMapTensor, [1, mEnv * nEnv, 3])) resTensorGamma = tf.pow(resTensor, invGamma) res_save = 255. * np.array(resTensorGamma[0]) res_save = convert_rgb_to_cv2(res_save) res_save_temp = np.zeros([res_save.shape[0], res_save.shape[1], 4]) res_save_temp[:, :, :3] = res_save[:, :, :] res_save_temp[:, :, 3] = albedoMap[:, :, 3] res_save = res_save_temp.astype(int) albedo_save = convert_rgb_to_cv2(albedo_save) albedo_save = albedo_save.astype(int) if (log_and_show): # plt.imshow(resTensor[0]) # plt.show() plt.imshow(resTensorGamma[0]) plt.show() if (write_results): cv2.imwrite(output_data_folder + output_code + "_Appearance_UV.png", res_save) cv2.imwrite(output_data_folder + output_code + "_Albedo_UV.png", albedo_save) cv2.imwrite(output_data_folder + output_code + "_Normal_UV.png", normalMap) return
def load_and_resize_envMap(path): envMap = load_rgb(path, -1) envMap = cv2.resize(envMap, (envMap_shape[1], envMap_shape[0]), interpolation=cv2.INTER_LINEAR) return tf.constant(envMap)