Example #1
0
    def sample_images_tensorboard(self, real_images):
        # prepare inputs
        reals = real_images[:self.n_samples, :, :, :]
        latents = tf.random.normal(shape=(self.n_samples, self.g_params['z_dim']), dtype=tf.dtypes.float32)
        dummy_labels = tf.ones((self.n_samples, self.g_params['labels_dim']), dtype=tf.dtypes.float32)

        # run networks
        fake_images_00, _ = self.g_clone([latents, dummy_labels], truncation_psi=0.0, training=False)
        fake_images_05, _ = self.g_clone([latents, dummy_labels], truncation_psi=0.5, training=False)
        fake_images_07, _ = self.g_clone([latents, dummy_labels], truncation_psi=0.7, training=False)
        fake_images_10, _ = self.g_clone([latents, dummy_labels], truncation_psi=1.0, training=False)

        # merge on batch dimension: [5 * n_samples, 3, out_res, out_res]
        out = tf.concat([reals, fake_images_00, fake_images_05, fake_images_07, fake_images_10], axis=0)

        # prepare for image saving: [5 * n_samples, out_res, out_res, 3]
        out = postprocess_images(out)

        # resize to save disk spaces: [5 * n_samples, size, size, 3]
        size = min(self.out_res, 256)
        out = tf.image.resize(out, size=[size, size])

        # make single image and add batch dimension for tensorboard: [1, 5 * size, n_samples * size, 3]
        out = merge_batch_images(out, size, rows=5, cols=self.n_samples)
        out = np.expand_dims(out, axis=0)
        return out
def convert_official_weights():
    # prepare variables & construct generator
    g_params = {
        'z_dim': 512,
        'w_dim': 512,
        'labels_dim': 0,
        'n_mapping': 8,
        'resolutions': [4, 8, 16, 32, 64, 128, 256, 512, 1024],
        'featuremaps': [512, 512, 512, 512, 512, 256, 128, 64, 32],
        'w_ema_decay': 0.995,
        'style_mixing_prob': 0.9,
    }
    g_clone = Generator(g_params)

    # finalize model (build)
    test_latent = np.ones((1, g_params['z_dim']), dtype=np.float32)
    test_labels = np.ones((1, g_params['labels_dim']), dtype=np.float32)
    _ = g_clone([test_latent, test_labels], training=False)
    _ = g_clone([test_latent, test_labels], training=True)

    # restore official ones to current implementation
    official_checkpoint = tf.train.latest_checkpoint('./official-pretrained')
    official_vars = tf.train.list_variables(official_checkpoint)

    # get name mapper
    name_mapper = variable_name_mapper(g_clone)

    # check shape
    check_shape(name_mapper, official_vars)

    # restore
    tf.compat.v1.train.init_from_checkpoint(official_checkpoint,
                                            assignment_map=name_mapper)

    # test
    seed = 6600
    rnd = np.random.RandomState(seed)
    latents = rnd.randn(1, g_params['z_dim'])
    latents = latents.astype(np.float32)
    image_out, _ = g_clone([latents, test_labels],
                           training=False,
                           truncation_psi=0.5)
    image_out = postprocess_images(image_out)
    image_out = image_out.numpy()
    Image.fromarray(image_out[0], 'RGB').save('seed{}.png'.format(seed))

    # save
    ckpt_dir = './official-converted'
    ckpt = tf.train.Checkpoint(g_clone=g_clone)
    manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=1)
    manager.save(checkpoint_number=0)
    return
Example #3
0
def test_generator(ckpt_dir, use_custom_cuda, out_fn):
    g_clone = load_generator(g_params=None, is_g_clone=True, ckpt_dir=ckpt_dir, custom_cuda=use_custom_cuda)

    # test
    seed = 6600
    rnd = np.random.RandomState(seed)
    latents = rnd.randn(1, g_clone.z_dim)
    labels = rnd.randn(1, g_clone.labels_dim)
    latents = latents.astype(np.float32)
    labels = labels.astype(np.float32)
    image_out = g_clone([latents, labels], training=False, truncation_psi=0.5)
    image_out = postprocess_images(image_out)
    image_out = image_out.numpy()

    out_fn = f'seed{seed}-{out_fn}'
    Image.fromarray(image_out[0], 'RGB').save(out_fn)
    return
def test_generator():
    # prepare variables & construct generator
    g_params = {
        'z_dim': 512,
        'w_dim': 512,
        'labels_dim': 0,
        'n_mapping': 8,
        'resolutions': [4, 8, 16, 32, 64, 128, 256, 512, 1024],
        'featuremaps': [512, 512, 512, 512, 512, 256, 128, 64, 32],
        'w_ema_decay': 0.995,
        'style_mixing_prob': 0.9,
    }
    g_clone = Generator(g_params)

    # finalize model (build)
    test_latent = np.ones((1, g_params['z_dim']), dtype=np.float32)
    test_labels = np.ones((1, g_params['labels_dim']), dtype=np.float32)
    _ = g_clone([test_latent, test_labels], training=False)
    _ = g_clone([test_latent, test_labels], training=True)

    # restore
    ckpt_dir = './official-converted'
    ckpt = tf.train.Checkpoint(g_clone=g_clone)
    manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=1)
    ckpt.restore(manager.latest_checkpoint)
    if manager.latest_checkpoint:
        print('Restored from {}'.format(manager.latest_checkpoint))

    # test
    seed = 6600
    rnd = np.random.RandomState(seed)
    latents = rnd.randn(1, g_params['z_dim'])
    latents = latents.astype(np.float32)
    image_out, _ = g_clone([latents, test_labels],
                           training=False,
                           truncation_psi=0.5)
    image_out = postprocess_images(image_out)
    image_out = image_out.numpy()
    Image.fromarray(image_out[0],
                    'RGB').save('seed{}-restored.png'.format(seed))
    return