def initiate_models(g_params, d_params, use_custom_cuda): discriminator = load_discriminator(d_params, ckpt_dir=None, custom_cuda=use_custom_cuda) generator = load_generator(g_params=g_params, is_g_clone=False, ckpt_dir=None, custom_cuda=use_custom_cuda) g_clone = load_generator(g_params=g_params, is_g_clone=True, ckpt_dir=None, custom_cuda=use_custom_cuda) # set initial g_clone weights same as generator g_clone.set_weights(generator.get_weights()) return discriminator, generator, g_clone
def convert_official_generator_weights(ckpt_dir, is_g_clone, use_custom_cuda): generator = load_generator(g_params=None, is_g_clone=is_g_clone, ckpt_dir=None, custom_cuda=use_custom_cuda) # restore official ones to current implementation official_checkpoint = tf.train.latest_checkpoint('./official-pretrained') official_vars = tf.train.list_variables(official_checkpoint) # get name mapper name_mapper = variable_name_mapper_g(generator, is_g_clone=is_g_clone) for name_g, tvar in name_mapper.items(): print(f'{name_g}: {tvar.name}') # check shape check_shape(name_mapper, official_vars) # restore tf.compat.v1.train.init_from_checkpoint(official_checkpoint, assignment_map=name_mapper) # save if is_g_clone: ckpt = tf.train.Checkpoint(g_clone=generator) else: ckpt = tf.train.Checkpoint(generator=generator) out_dir = os.path.join(ckpt_dir, 'g_clone' if is_g_clone else 'generator') manager = tf.train.CheckpointManager(ckpt, out_dir, max_to_keep=1) manager.save(checkpoint_number=0) return
def convert_official_weights_together(ckpt_dir, use_custom_cuda): # instantiate all models discriminator = load_discriminator(d_params=None, ckpt_dir=None, custom_cuda=use_custom_cuda) generator = load_generator(g_params=None, is_g_clone=False, ckpt_dir=None, custom_cuda=use_custom_cuda) g_clone = load_generator(g_params=None, is_g_clone=True, ckpt_dir=None, custom_cuda=use_custom_cuda) # restore official ones official_checkpoint = tf.train.latest_checkpoint('./official-pretrained') official_vars = tf.train.list_variables(official_checkpoint) for name, shape in official_vars: print(f'{name}: {shape}') # get name mapper name_mapper_d = variable_name_mapper_d(discriminator) name_mapper_g1 = variable_name_mapper_g(generator, is_g_clone=False) name_mapper_g2 = variable_name_mapper_g(g_clone, is_g_clone=True) name_mapper = {**name_mapper_d, **name_mapper_g1, **name_mapper_g2} # check shape check_shape(name_mapper, official_vars) # restore tf.compat.v1.train.init_from_checkpoint(official_checkpoint, assignment_map=name_mapper) # save ckpt = tf.train.Checkpoint(discriminator=discriminator, generator=generator, g_clone=g_clone) manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=1) manager.save(checkpoint_number=0) return
def test_generator(ckpt_dir, use_custom_cuda, out_fn): g_clone = load_generator(g_params=None, is_g_clone=True, ckpt_dir=ckpt_dir, custom_cuda=use_custom_cuda) # test seed = 6600 rnd = np.random.RandomState(seed) latents = rnd.randn(1, g_clone.z_dim) labels = rnd.randn(1, g_clone.labels_dim) latents = latents.astype(np.float32) labels = labels.astype(np.float32) image_out = g_clone([latents, labels], training=False, truncation_psi=0.5) image_out = postprocess_images(image_out) image_out = image_out.numpy() out_fn = f'seed{seed}-{out_fn}' Image.fromarray(image_out[0], 'RGB').save(out_fn) return
def inference(ckpt_dir, use_custom_cuda, res, out_fn=None): # create generator resolutions = [4, 8, 16, 32, 64, 128, 256, 512, 1024] featuremaps = [512, 512, 512, 512, 512, 256, 128, 64, 32] filter_index = resolutions.index(res) g_params = { 'z_dim': 512, 'w_dim': 512, 'labels_dim': 0, 'n_mapping': 8, 'resolutions': resolutions[:filter_index + 1], 'featuremaps': featuremaps[:filter_index + 1], } generator = load_generator(g_params, is_g_clone=True, ckpt_dir=ckpt_dir, custom_cuda=use_custom_cuda) # generate image fake_images = generator([ tf.random.normal(shape=[1, g_params['z_dim']]), tf.random.normal(shape=[1, g_params['labels_dim']]) ], training=False, truncation_psi=0.5) fake_images = (tf.clip_by_value(fake_images, -1.0, 1.0) + 1.0) * 127.5 fake_images = tf.transpose(fake_images, perm=[0, 2, 3, 1]) fake_images = tf.cast(fake_images, tf.uint8) fake_image = fake_images[0].numpy() image = Image.fromarray(fake_image) image = image.convert('RGB') image.show() if out_fn is not None: image.save(out_fn) return
message = f'{res}x{res} with custom cuda' if use_custom_cuda else f'{res}x{res} without custom cuda' print(message) resolutions = [4, 8, 16, 32, 64, 128, 256] feature_maps = [512, 512, 512, 512, 512, 256, 128] filter_index = resolutions.index(res) g_params = { 'z_dim': 512, 'w_dim': 512, 'labels_dim': 0, 'n_mapping': 8, 'resolutions': resolutions[:filter_index + 1], 'featuremaps': feature_maps[:filter_index + 1], } generator = load_generator(g_params, is_g_clone=True, ckpt_dir=ckpt_dir, custom_cuda=use_custom_cuda) model = classification_model(256, 256) model.load_weights('best_weights.hdf5') alpha = np.zeros((1, 14, 512)) alpha[:, 8:, :] = alpha[:, 8:, :] + 2 * (epsilon / iterations) image_count = 0 bar = tqdm(latent_dict.items()) for file_name, latent in bar: gen_latent = tf.identity(latent) gen_latent = gen_latent + tf.random.uniform( gen_latent.get_shape().as_list(), minval=-epsilon, maxval=epsilon,