def main(unused_argv): if not FLAGS.stack_folder: raise ValueError("stack_folder was not defined") (pano_stack, alignment_params) = stack_io.read_stack(FLAGS.stack_folder) unused_azimuth, lighting_context = stack_io.load_sample_illuminations() lighting_context_factors = tf.constant(lighting_context, dtype=tf.float32) # [0, 1]-ranged panoramas of shape [384, 960]. pano_stack = tf.constant(pano_stack, dtype=tf.float32) alignment_params = tf.constant(alignment_params, dtype=tf.float32) # Align images using parameters. alignment_module = image_alignment.ImageAlignment(regularization=0.3) aligned_stack = alignment_module.align_images(pano_stack, alignment_params) factorize_model = network.FactorizeEncoderDecoder( { "lighting_dim": 32, "permanent_dim": 16 }, is_training=False) stack_factors = factorize_model.compute_decomposition( aligned_stack, single_image_decomposition=False, average_stack=True) recon = network.recomposite_from_log_components( stack_factors["log_reflectance"], stack_factors["log_shading"]) rotate_shading_image = factorize_model.generate_sun_rotation( stack_factors["permanent_factor"][:1], lighting_context_factors[FLAGS.lighting_context_index:FLAGS. lighting_context_index + 1], FLAGS.azimuth_frame_rate) results = network.recomposite_from_log_components( stack_factors["log_reflectance"], rotate_shading_image) # Restore factorization network weights from ckpt. tf.train.init_from_checkpoint( "./factorize_a_city/ckpt/factorize_model.ckpt", {"decomp_internal/": "decomp_internal/"}) sess = tf.Session() sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() out = sess.run(results) stack_io.write_stack_images(FLAGS.output_dir, out / 255.)
def main(unused_argv): if not FLAGS.stack_folder: raise ValueError("stack_folder was not defined") # Load example stacks. Each panorama has shape [384, 960, 3] and has values # between [0, 1]. (permanent_stack, alignment_params) = stack_io.read_stack(FLAGS.stack_folder) # Load example azimuth and illumination samples. azimuth, lighting_context = stack_io.load_sample_illuminations() permanent_stack = tf.constant(permanent_stack, dtype=tf.float32) azimuth_factors = tf.constant(azimuth, dtype=tf.float32) lighting_context_factors = tf.constant(lighting_context, dtype=tf.float32) # Align images using learnt parameters. alignment_module = image_alignment.ImageAlignment(regularization=0.3) aligned_stack = alignment_module.align_images(permanent_stack, alignment_params) factorize_model = network.FactorizeEncoderDecoder( { "lighting_dim": 32, "permanent_dim": 16 }, is_training=False) stack_factors = factorize_model.compute_decomposition(aligned_stack) permanent_factor = stack_factors["permanent_factor"] permanent_factor = tf.tile(permanent_factor[:1], [azimuth.shape[0], 1, 1, 1]) shading_image = factorize_model.generate_shading_image( permanent_factor, lighting_context_factors, azimuth_factors) relit_results = network.recomposite_from_log_components( stack_factors["log_reflectance"], shading_image) # Restore factorization network weights from ckpt. tf.train.init_from_checkpoint( "./factorize_a_city/ckpt/factorize_model.ckpt", {"decomp_internal/": "decomp_internal/"}) sess = tf.Session() sess.run(tf.global_variables_initializer()) out = sess.run(relit_results) stack_io.write_stack_images(FLAGS.output_dir, out / 255.)