Exemple #1
0
def generate_images(network_pkl, seeds, truncation_psi):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
                                      nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    if truncation_psi is not None:
        Gs_kwargs.truncation_psi = truncation_psi

    for seed_idx, seed in enumerate(seeds):
        print('Generating image for seed %d (%d/%d) ...' %
              (seed, seed_idx, len(seeds)))
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:])  # [minibatch, component]
        tflib.set_vars(
            {var: rnd.randn(*var.shape.as_list())
             for var in noise_vars})  # [height, width]
        images = Gs.run(z, None,
                        **Gs_kwargs)  # [minibatch, height, width, channel]
        PIL.Image.fromarray(images[0], 'RGB').save(
            dnnlib.make_run_dir_path('seed%04d.png' % seed))
def project_generated_images(network_pkl, seeds, num_snapshots,
                             truncation_psi):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.randomize_noise = False
    Gs_kwargs.truncation_psi = truncation_psi

    for seed_idx, seed in enumerate(seeds):
        print('Projecting seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:])
        tflib.set_vars(
            {var: rnd.randn(*var.shape.as_list())
             for var in noise_vars})
        images = Gs.run(z, None, **Gs_kwargs)
        project_image(proj,
                      targets=images,
                      png_prefix=dnnlib.make_run_dir_path('seed%04d-' % seed),
                      num_snapshots=num_snapshots)
def project_real_images(network_pkl, dataset_name, data_dir, num_images,
                        num_snapshots):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)
    proj.num_steps = num_snapshots
    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       tfrecord_dir=dataset_name,
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]
    # for i in range(10):
    #     images, _labels = dataset_obj.get_minibatch_np(1)
    #
    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        project_image(proj,
                      targets=images,
                      png_prefix=dnnlib.make_run_dir_path('image%04d-' %
                                                          image_idx),
                      num_snapshots=num_snapshots)
Exemple #4
0
 def save_blends(imgs_a, imgs_b, path, offset=0):
     if grid:
         img_a = to_pil(create_img_grid(imgs_a, grid_size), drange)
         img_b = to_pil(create_img_grid(imgs_b, grid_size), drange)
         blend = PIL.Image.blend(img_a, img_b, alpha=alpha)
         blend.save(dnnlib.make_run_dir_path(path % offset))
     else:
         img_pairs = zip(imgs_a, listimgs_b)
         img_pairs = enumerate(img_pairs)
         if verbose:
             img_pairs = tqdm(list(img_pairs))
         for i, (img_a, img_b) in img_pairs:
             img_a = to_pil(img_a, drange=drange)
             img_b = to_pil(img_b, drange=drange)
             blend = PIL.Image.blend(img_a, img_b, alpha=alpha)
             blend.save(dnnlib.make_run_dir_path(path % (offset + i)))
Exemple #5
0
def style_mixing_example(network_pkl, row_seeds, col_seeds, truncation_psi, col_styles, minibatch_size=4):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    w_avg = Gs.get_var('dlatent_avg') # [component]

    Gs_syn_kwargs = dnnlib.EasyDict()
    Gs_syn_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    Gs_syn_kwargs.randomize_noise = False
    Gs_syn_kwargs.minibatch_size = minibatch_size

    print('Generating W vectors...')
    all_seeds = list(set(row_seeds + col_seeds))
    all_z = np.stack([np.random.RandomState(seed).randn(*Gs.input_shape[1:]) for seed in all_seeds]) # [minibatch, component]
    all_w = Gs.components.mapping.run(all_z, None) # [minibatch, layer, component]
    all_w = w_avg + (all_w - w_avg) * truncation_psi # [minibatch, layer, component]
    w_dict = {seed: w for seed, w in zip(all_seeds, list(all_w))} # [layer, component]

    print('Generating images...')
    all_images = Gs.components.synthesis.run(all_w, **Gs_syn_kwargs) # [minibatch, height, width, channel]
    image_dict = {(seed, seed): image for seed, image in zip(all_seeds, list(all_images))}

    print('Generating style-mixed images...')
    for row_seed in row_seeds:
        for col_seed in col_seeds:
            w = w_dict[row_seed].copy()
            w[col_styles] = w_dict[col_seed][col_styles]
            image = Gs.components.synthesis.run(w[np.newaxis], **Gs_syn_kwargs)[0]
            image_dict[(row_seed, col_seed)] = image

    print('Saving images...')
    for (row_seed, col_seed), image in image_dict.items():
        PIL.Image.fromarray(image, 'RGB').save(dnnlib.make_run_dir_path('%d-%d.png' % (row_seed, col_seed)))

    print('Saving image grid...')
    _N, _C, H, W = Gs.output_shape
    canvas = PIL.Image.new('RGB', (W * (len(col_seeds) + 1), H * (len(row_seeds) + 1)), 'black')
    for row_idx, row_seed in enumerate([None] + row_seeds):
        for col_idx, col_seed in enumerate([None] + col_seeds):
            if row_seed is None and col_seed is None:
                continue
            key = (row_seed, col_seed)
            if row_seed is None:
                key = (col_seed, col_seed)
            if col_seed is None:
                key = (row_seed, row_seed)
            canvas.paste(PIL.Image.fromarray(image_dict[key], 'RGB'), (W * col_idx, H * row_idx))
    canvas.save(dnnlib.make_run_dir_path('grid.png'))
Exemple #6
0
def generate_images(network_pkl, seeds, npy_files, truncation_psi):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
                                      nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    if truncation_psi is not None:
        Gs_kwargs.truncation_psi = truncation_psi

    if seeds is not None:
        for seed_idx, seed in enumerate(seeds):
            print('Generating image for seed %d (%d/%d) ...' %
                  (seed, seed_idx + 1, len(seeds)))
            rnd = np.random.RandomState(seed)
            z = rnd.randn(1, *Gs.input_shape[1:])  # [minibatch, component]
            tflib.set_vars(
                {var: rnd.randn(*var.shape.as_list())
                 for var in noise_vars})  # [height, width]
            images = Gs.run(z, None,
                            **Gs_kwargs)  # [minibatch, height, width, channel]
            generated_img = PIL.Image.fromarray(images[0], 'RGB')
            return generated_img
            PIL.Image.fromarray(images[0], 'RGB').save(
                dnnlib.make_run_dir_path('seed%04d.png' % seed))

    if npy_files is not None:
        npys = npy_files.split(',')
        dlatent_avg = Gs.get_var('dlatent_avg')  # [component]

        for npy in range(len(npys)):
            print('Generating image from npy (%d/%d) ...' %
                  (npy + 1, len(npys)))
            w = np.load(npys[npy])
            print(w.shape)
            rnd = np.random.RandomState(1)
            dl = (w - dlatent_avg) * truncation_psi + dlatent_avg
            images = Gs.components.synthesis.run(
                w, **Gs_kwargs)  # [minibatch, height, width, channel]
            name = os.path.basename(npys[npy])
            PIL.Image.fromarray(images[0], 'RGB').save(
                dnnlib.make_run_dir_path('%s.png' % name))
def generate_neighbors(network_pkl, seeds, diameter, truncation_psi,
                       num_samples, save_vector):
    global _G, _D, Gs, noise_vars
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
                                      nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    if truncation_psi is not None:
        Gs_kwargs.truncation_psi = truncation_psi

    for seed_idx, seed in enumerate(seeds):
        print('Generating image for seed %d (%d/%d) ...' %
              (seed, seed_idx + 1, len(seeds)))
        rnd = np.random.RandomState(seed)

        og_z = rnd.randn(1, *Gs.input_shape[1:])  # [minibatch, component]
        tflib.set_vars(
            {var: rnd.randn(*var.shape.as_list())
             for var in noise_vars})  # [height, width]
        images = Gs.run(og_z, None,
                        **Gs_kwargs)  # [minibatch, height, width, channel]
        PIL.Image.fromarray(images[0], 'RGB').save(
            dnnlib.make_run_dir_path('seed%04d.png' % seed))

        zs = []
        z_prefix = 'seed%04d_neighbor' % seed

        for s in range(num_samples):
            random = np.random.uniform(-diameter, diameter, [1, 512])
            #             zs.append(np.clip((og_z+random),-1,1))
            new_z = np.clip(np.add(og_z, random), -1, 1)
            images = Gs.run(new_z, None,
                            **Gs_kwargs)  # [minibatch, height, width, channel]
            PIL.Image.fromarray(images[0], 'RGB').save(
                dnnlib.make_run_dir_path('%s%04d.png' % (z_prefix, s)))
            # generate_latent_images(zs, truncation_psi, save_vector, z_prefix)
            if save_vector:
                np.save(dnnlib.make_run_dir_path('%s%05d.npy' % (z_prefix, s)),
                        new_z)
Exemple #8
0
def generate_images(network_pkl, seeds, truncation_psi, latents):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
                                      nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    if truncation_psi is not None:
        Gs_kwargs.truncation_psi = truncation_psi
    if latents == None:
        for seed_idx, seed in enumerate(seeds):
            print('Generating image for seed %d (%d/%d) ...' %
                  (seed, seed_idx, len(seeds)))
            rnd = np.random.RandomState(seed)
            z = rnd.randn(1, *Gs.input_shape[1:])  # [minibatch, component]
            with open("test.txt", "ab") as f:
                np.savetxt(f, z, delimiter=',', fmt='%1.5f')
                f.write(b"\n")
            tflib.set_vars(
                {var: rnd.randn(*var.shape.as_list())
                 for var in noise_vars})  # [height, width]
            images = Gs.run(z, None,
                            **Gs_kwargs)  # [minibatch, height, width, channel]
            PIL.Image.fromarray(images[0], 'RGB').save(
                dnnlib.make_run_dir_path('seed%04d.png' % seed))
    else:
        seed = np.random.choice(seeds)
        latents = np.genfromtxt(latents, delimiter=',')
        if latents.ndim == 1:
            latents = np.expand_dims(latents, axis=0)
        assert latents.shape[1] == 512
        for lat_idx, lat in enumerate(latents):
            z = np.expand_dims(lat, axis=0)
            rnd = np.random.RandomState(seed)
            tflib.set_vars(
                {var: rnd.randn(*var.shape.as_list())
                 for var in noise_vars})  # [height, width]
            images = Gs.run(z, None,
                            **Gs_kwargs)  # [minibatch, height, width, channel]
            PIL.Image.fromarray(images[0], 'RGB').save(
                dnnlib.make_run_dir_path('lat%04d.png' % lat_idx))
Exemple #9
0
 def restore(self, npy_dir, img_dir):
     print(npy_dir)
     w = np.load(npy_dir)[np.newaxis, :]
     with self.graph.as_default():
         with self.session.as_default():
             images = self.Gs_network.components.synthesis.run(
                 w, **self.Gs_syn_kwargs)
         PIL.Image.fromarray(images[0],
                             'RGB').save(dnnlib.make_run_dir_path(img_dir))
Exemple #10
0
def generate_images(args):

    os.makedirs(args.dst_dir, exist_ok=True)
    os.makedirs('{}/images/'.format(args.dst_dir), exist_ok=True)
    os.makedirs('{}/generate_zcodes/'.format(args.dst_dir), exist_ok=True)
    os.makedirs('{}/generate_wcodes/'.format(args.dst_dir), exist_ok=True)
    print('Loading networks from "{}"...'.format(args.network_pkl))
    tflib.init_tf()
    with open(args.network_pkl, "rb") as f:
        generator_network, discriminator_network, Gs_network = pickle.load(f)

    Gs_syn_kwargs = dnnlib.EasyDict()
    Gs_syn_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
                                          nchw_to_nhwc=True)
    Gs_syn_kwargs.randomize_noise = False
    Gs_syn_kwargs.minibatch_size = 1

    for i in tqdm(range(int(args.num))):

        # Generate random latent
        w_avg = Gs_network.get_var('dlatent_avg')
        noise_vars = [
            var for name, var in Gs_network.components.synthesis.vars.items()
            if name.startswith('noise')
        ]
        z = np.random.randn(1, *Gs_network.input_shape[1:])
        tflib.set_vars(
            {var: np.random.randn(*var.shape.as_list())
             for var in noise_vars})
        # Generate dlatent
        w = Gs_network.components.mapping.run(z, None)
        w = w_avg + (w - w_avg) * args.truncation_psi

        # Save latent
        if args.save_latent:
            ztxt_filename = '{}/generate_zcodes/{}.txt'.format(
                args.dst_dir,
                str(i).zfill(4))
            with open(ztxt_filename, 'w') as f:
                text_save(f, z)

        # Save dlatent
        if args.save_dlatent:
            wtxt_filename = '{}/generate_wcodes/{}.npy'.format(
                args.dst_dir,
                str(i).zfill(4))
            np.save(wtxt_filename, w[0])

        # Generate image

        images = Gs_network.components.synthesis.run(w, **Gs_syn_kwargs)

        # Save image
        PIL.Image.fromarray(images[0], 'RGB').save(
            dnnlib.make_run_dir_path('{}/images/{}.png'.format(
                args.dst_dir,
                str(i).zfill(4))))
Exemple #11
0
def generate_neighbors(network_pkl, seeds, npys, diameter, truncation_psi,
                       num_samples, save_vector, outdir):
    global _G, _D, Gs, noise_vars
    tflib.init_tf()
    print('Loading networks from "%s"...' % network_pkl)
    with dnnlib.util.open_url(network_pkl) as fp:
        _G, _D, Gs = pickle.load(fp)

    os.makedirs(outdir, exist_ok=True)

    # Render images for dlatents initialized from random seeds.
    Gs_kwargs = {
        'output_transform':
        dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True),
        'randomize_noise':
        False,
        'truncation_psi':
        truncation_psi
    }

    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]

    for seed_idx, seed in enumerate(seeds):
        print('Generating image for seed %d (%d/%d) ...' %
              (seed, seed_idx + 1, len(seeds)))
        rnd = np.random.RandomState(seed)

        og_z = rnd.randn(1, *Gs.input_shape[1:])  # [minibatch, component]
        tflib.set_vars(
            {var: rnd.randn(*var.shape.as_list())
             for var in noise_vars})  # [height, width]
        images = Gs.run(og_z, None,
                        **Gs_kwargs)  # [minibatch, height, width, channel]
        # PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('seed%04d.png' % seed))
        PIL.Image.fromarray(images[0],
                            'RGB').save(f'{outdir}/seed{seed:05d}.png')

        zs = []
        z_prefix = 'seed%04d_neighbor' % seed

        for s in range(num_samples):
            random = np.random.uniform(-diameter, diameter, [1, 512])
            #             zs.append(np.clip((og_z+random),-1,1))
            new_z = np.clip(np.add(og_z, random), -1, 1)
            images = Gs.run(new_z, None,
                            **Gs_kwargs)  # [minibatch, height, width, channel]
            # PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('%s%04d.png' % (z_prefix,s)))
            PIL.Image.fromarray(images[0],
                                'RGB').save(f'{outdir}/{z_prefix}{s:05d}.png')
            # generate_latent_images(zs, truncation_psi, save_vector, z_prefix)
            if save_vector:
                np.save(dnnlib.make_run_dir_path('%s%05d.npy' % (z_prefix, s)),
                        new_z)
Exemple #12
0
def make_latent_interp_animation(code1, code2, num_interps):
    step_size = 1.0 / num_interps
    amounts = np.arange(0, 1, step_size)
    count = 0
    for alpha in tqdm(amounts):
        interpolated_latent_code = linear_interpolate(code1, code2, alpha)
        images = generate_image_from_z(interpolated_latent_code)
        PIL.Image.fromarray(images[0], 'RGB').save(
            dnnlib.make_run_dir_path(f'{count}.png'))
        count += 1
Exemple #13
0
def create_look_alikes(network_pkl, w_vectors_file, seeds_file, start_distance,
                       end_distance, steps):
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    w_avg = Gs.get_var('dlatent_avg')  # [component]
    all_w = np.load(w_vectors_file)
    seeds = np.load(seeds_file)
    lookalikes = np.array([w_avg + (all_w - w_avg) * truncation_psi\
            for truncation_psi in np.arange(start_distance, end_distance, (end_distance-start_distance)/steps)])
    for seed, lookalike in zip(seeds, lookalikes):
        np.save(dnnlib.make_run_dir_path('seed%04d.npy' % seed), lookalike)
Exemple #14
0
def generate_images(network_pkl, seed_z, seeds, truncation_psi):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]

    Gs_kwargs = dnnlib.EasyDict()
    # Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    if truncation_psi is not None:
        Gs_kwargs.truncation_psi = truncation_psi

    rnd = np.random.RandomState(seed_z)
    z = rnd.randn(1, *Gs.input_shape[1:])
    img = []
    idx = [0, 2, 4, 6, 8, 10, 11]
    for layer_idx in idx:
        print('Generating image for %d (%d/%d) ...' %
              (layer_idx, layer_idx, len(noise_vars)))
        tflib.set_vars(
            {var: rnd.randn(*var.shape.as_list())
             for var in noise_vars})  # [height, width]
        zero_vars = noise_vars[layer_idx:]

        if len(zero_vars) != 0:
            tflib.set_vars({
                var: np.zeros(var.shape.as_list(), dtype=np.float32)
                for var in zero_vars
            })  # [height, width]
        images = Gs.run(z, None,
                        **Gs_kwargs)  # [minibatch, height, width, channel]
        img.append(images)
        # PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('seed%04d.png' % seed))
        misc.convert_to_pil_image(images[0], drange=[-1, 1]).save(
            dnnlib.make_run_dir_path('seed%04d.png' % layer_idx))
    img = np.concatenate(img, 0)
    misc.save_image_grid(img,
                         dnnlib.make_run_dir_path('img.png'),
                         drange=[-1, 1],
                         grid_size=[7, 1])
Exemple #15
0
def project_real_images(network_pkl, dataset_name, data_dir, num_images,
                        start_index, num_snapshots, save_vector):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    print('Num images: %d, Starting Index: %d' % (num_images, start_index))
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       verbose=True,
                                       tfrecord_dir=dataset_name,
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    img_filenames = None
    if dataset_obj._np_filenames is not None:
        assert num_images <= dataset_obj.filenames_size
        img_filenames = dataset_obj._np_filenames

    for image_idx in range(start_index, start_index + num_images):
        filename = img_filenames[
            image_idx] if img_filenames is not None else 'unknown'
        print('Projecting image %d/%d... (index: %d, filename: %s)' %
              (image_idx - start_index, num_images, image_idx, filename))

        images, labels = dataset_obj.get_minibatch_np(1)
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])

        project_image(proj,
                      targets=images,
                      labels=labels,
                      png_prefix=dnnlib.make_run_dir_path('image%04d-' %
                                                          image_idx),
                      num_snapshots=num_snapshots,
                      save_npy=save_vector,
                      npy_file_prefix=dnnlib.make_run_dir_path(filename))
        print(
            '✅ Finished projecting image %d/%d... (index: %d, filename: %s)' %
            (image_idx - start_index + 1, num_images, image_idx, filename))
Exemple #16
0
def generate_latent_images(zs, truncation_psi, save_npy, prefix):
    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    if not isinstance(truncation_psi, list):
        truncation_psi = [truncation_psi] * len(zs)

    for z_idx, z in enumerate(zs):
        if isinstance(z, list):
            z = np.array(z).reshape(1, 512)
        elif isinstance(z, np.ndarray):
            z.reshape(1, 512)
        print('Generating image for step %d/%d ...' % (z_idx, len(zs)))
        Gs_kwargs.truncation_psi = truncation_psi[z_idx]
        noise_rnd = np.random.RandomState(1)  # fix noise
        tflib.set_vars({var: noise_rnd.randn(*var.shape.as_list()) for var in noise_vars})  # [height, width]
        images = Gs.run(z, None, **Gs_kwargs)  # [minibatch, height, width, channel]
        PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('%s%05d.png' % (prefix, z_idx)))
        if save_npy:
            np.save(dnnlib.make_run_dir_path('%s%05d.npy' % (prefix, z_idx)), z)
Exemple #17
0
def generate_attr_dataset(network_pkl, n_data_samples, start_seed,
                          resolution, run_batch, used_semantics_ls, attr2idx_dict,
                          create_new_G, new_func_name, truncation_psi=0.5):
    '''
    used_semantics_ls: ['azimuth', 'haircolor', ...]
    attr2idx_dict: {'azimuth': 10, 'haircolor': 17, 'smile': 6, ...}
    '''
    tflib.init_tf()
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, I, Gs = misc.load_pkl(network_pkl)
    if create_new_G:
        Gs = Gs.convert(new_func_name=new_func_name)

    attr = {'names': used_semantics_ls}
    idxes = [attr2idx_dict[name] for name in used_semantics_ls]
    attr_ls = []
    for seed in range(start_seed, start_seed + n_data_samples, run_batch):
        rnd = np.random.RandomState(seed)
        if seed + run_batch >= start_seed + n_data_samples:
            b = start_seed + n_data_samples - seed
        else:
            b = run_batch
        Gs_kwargs = dnnlib.EasyDict(randomize_noise=True, minibatch_size=b, is_validation=True)
        # z = rnd.randn(b, *Gs.input_shape[1:]) # [minibatch, component]
        z = truncated_z_sample(b, Gs.input_shape[1], truncation=truncation_psi, seed=seed)
        images = get_return_v(Gs.run(z, None, **Gs_kwargs), 1) # [b, c, h, w]

        shrink = Gs.output_shape[-1] // resolution
        if shrink > 1:
            _, c, h, w = images.shape
            images = images.reshape(b, c, h // shrink, shrink, w // shrink, shrink).mean(5).mean(3)

        images = misc.adjust_dynamic_range(images, [-1, 1], [0, 255])
        images = np.transpose(images, [0, 2, 3, 1])
        images = np.rint(images).clip(0, 255).astype(np.uint8)
        for i in range(len(z)):
            PIL.Image.fromarray(images[i], 'RGB').save(dnnlib.make_run_dir_path('seed%07d.png' % (seed + i)))
        attr_ls.append(z[:, idxes])
    attr['data'] = np.concatenate(attr_ls, axis=0)
    with open(dnnlib.make_run_dir_path(f'attrs.pkl'), 'wb') as f:
        pickle.dump(attr, f)
Exemple #18
0
def generate_w_vectors(network_pkl, seeds, truncation_psi):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    w_avg = Gs.get_var('dlatent_avg')  # [component]

    Gs_syn_kwargs = dnnlib.EasyDict()
    Gs_syn_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
                                          nchw_to_nhwc=True)
    Gs_syn_kwargs.randomize_noise = False

    print('Generating W vectors...')
    all_z = np.stack([
        np.random.RandomState(seed).randn(*Gs.input_shape[1:])
        for seed in seeds
    ])  # [minibatch, component]
    all_w = Gs.components.mapping.run(all_z,
                                      None)  # [minibatch, layer, component]
    all_w = w_avg + (all_w -
                     w_avg) * truncation_psi  # [minibatch, layer, component]
    np.save(dnnlib.make_run_dir_path('w_vectors.npy'), all_w)
    np.save(dnnlib.make_run_dir_path('seeds.npy'), seeds)
Exemple #19
0
def generate_images_in_w_space(dlatents, truncation_psi, save_npy, prefix):
    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
                                      nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    Gs_kwargs.truncation_psi = truncation_psi
    dlatent_avg = Gs.get_var('dlatent_avg')  # [component]

    # temp_dir = 'frames%06d'%int(1000000*random.random())
    # os.system('mkdir %s'%temp_dir)

    for row, dlatent in enumerate(dlatents):
        print('Generating image for step %d/%d ...' % (row, len(dlatents)))
        #row_dlatents = (dlatent[np.newaxis] - dlatent_avg) * np.reshape(truncation_psi, [-1, 1, 1]) + dlatent_avg
        dl = (dlatent - dlatent_avg) * truncation_psi + dlatent_avg
        row_images = Gs.components.synthesis.run(dlatent, **Gs_kwargs)
        PIL.Image.fromarray(row_images[0], 'RGB').save(
            dnnlib.make_run_dir_path('frame%05d.png' % row))
        if save_npy:
            np.save(dnnlib.make_run_dir_path('%s%05d.npy' % (prefix, row)),
                    dlatent)
Exemple #20
0
def convert_pkl(network_pkl, new_func_name_G, new_func_name_D, new_func_name_I):
    tflib.init_tf()
    print('Loading networks from "%s"...' % network_pkl)
    # _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    _G, _D, _I, _Gs = misc.load_pkl(network_pkl)
    Gs = _Gs.convert(new_func_name=new_func_name_G, synthesis_func='G_synthesis_modular_ps_sc')
    G = _G.convert(new_func_name=new_func_name_G, synthesis_func='G_synthesis_modular_ps_sc')
    D = _D.convert(new_func_name=new_func_name_D)
    I = _I.convert(new_func_name=new_func_name_I)

    misc.save_pkl((G, D, I, Gs),
                  dnnlib.make_run_dir_path('network-saved.pkl'))
Exemple #21
0
    def random_generate(self):

        with self.graph.as_default():
            with self.session.as_default():
                z = np.random.randn(
                    1,
                    *self.Gs_network.input_shape[1:])  # [minibatch, component]
                # Generate image
                images = self.Gs_network.run(
                    z, None, **
                    self.Gs_syn_kwargs)  # [minibatch, height, width, channel]
        PIL.Image.fromarray(images[0], 'RGB').save(
            dnnlib.make_run_dir_path('./static/img/random_face.jpg'))
def project_generated_images(network_pkl, seeds, num_snapshots, num_steps,
    truncation_psi, save_snapshots=False, save_latents=False, save_umap=False,
    save_tiles=False):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)
    proj.num_steps = num_steps
    noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.randomize_noise = False
    Gs_kwargs.truncation_psi = truncation_psi

    latents = np.zeros((len(seeds), Gs.input_shape[1]), dtype=np.float32)
    tiles = [None] * num_images
    for seed_idx, seed in enumerate(seeds):
        print('Projecting seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:])
        tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars})
        images = Gs.run(z, None, **Gs_kwargs)
        tiles[image_idx] = images[0, ...].transpose(1, 2, 0)
        latents[seed_idx, ...] = project_image(proj, targets=images,
            png_prefix=dnnlib.make_run_dir_path('seed%04d-' % seed),
            num_snapshots=num_snapshots, save_snapshots=save_snapshots)

        if save_latents:
            filename = dnnlib.make_run_dir_path('generated_image_latent_{:06d}'.format(image_idx))
            np.save(filename, latents[image_idx, ...])

    if save_latents:
        filename = dnnlib.make_run_dir_path('generated_image_latents.npy')
        np.save(filename, latents)

    if save_umap:
        reducer = umap.UMAP()
        embeddings = reducer.fit_transform(latents)
        filename = dnnlib.make_run_dir_path('generated_image_umap.json')
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(embeddings.tolist(), f, ensure_ascii=False)

    if save_tiles:
        tiles_prefix = dnnlib.make_run_dir_path('generated_tile_solid')
        misc.save_texture_grid(tiles, tiles_prefix)

        textures_prefix = dnnlib.make_run_dir_path('generated_texture_solid')
        textures = [misc.make_white_square() for _ in range(len(tiles))]
        misc.save_texture_grid(textures, textures_prefix)

        filename = dnnlib.make_run_dir_path('labels.json')
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump([0.0] * len(tiles), f, ensure_ascii=False)
def show_real_data(data_dir, dataset_name, number):
    tflib.init_tf()
    dataset_args = EasyDict(tfrecord_dir=dataset_name, max_label_size='full')
    training_set = dataset.load_dataset(data_dir=dnnlib.convert_path(data_dir),
                                        verbose=True,
                                        **dataset_args)
    gw = 1
    gh = 1
    for i in range(number):
        reals, _ = training_set.get_minibatch_np(gw * gh)
        misc.save_image_grid(reals,
                             dnnlib.make_run_dir_path('reals%04d.png' % (i)),
                             drange=training_set.dynamic_range,
                             grid_size=None)
def project_real_images(network_pkl, dataset_name, data_dir, num_images,
    num_snapshots, num_steps, save_snapshots=False, save_latents=False,
    save_umap=False, save_tiles=False):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)
    proj.num_steps = num_steps

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir, tfrecord_dir=dataset_name, max_label_size=0, repeat=False, shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    latents = np.zeros((num_images, Gs.input_shape[1]), dtype=np.float32)
    tiles = [None] * num_images
    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        tiles[image_idx] = images[0, ...].transpose(1, 2, 0)
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        latents[image_idx, ...] = project_image(proj, targets=images,
            png_prefix=dnnlib.make_run_dir_path('image%04d-' % image_idx),
            num_snapshots=num_snapshots, save_snapshots=save_snapshots)

        if save_latents:
            filename = dnnlib.make_run_dir_path('real_image_latent_{:06d}'.format(image_idx))
            np.save(filename, latents[image_idx, ...])


    if save_latents:
        filename = dnnlib.make_run_dir_path('real_image_latents.npy')
        np.save(filename, latents)

    if save_umap:
        reducer = umap.UMAP()
        embeddings = reducer.fit_transform(latents)
        filename = dnnlib.make_run_dir_path('real_image_umap.json')
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(embeddings.tolist(), f, ensure_ascii=False)

    if save_tiles:
        tiles_prefix = dnnlib.make_run_dir_path('real_tile_solid')
        misc.save_texture_grid(tiles, tiles_prefix)

        textures_prefix = dnnlib.make_run_dir_path('real_texture_solid')
        textures = [misc.make_white_square() for _ in range(len(tiles))]
        misc.save_texture_grid(textures, textures_prefix)

        filename = dnnlib.make_run_dir_path('labels.json')
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump([0.0] * len(tiles), f, ensure_ascii=False)
Exemple #25
0
def get_latents_for_seeds(submit_config, network_pkl, seeds):
    print('starting process of getting latents for seeds ' + str(seeds))

    tflib.init_tf({'rnd.np_random_seed': 1000})

    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)

    for seed_idx, seed in enumerate(seeds):
        print('Projecting seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:])
        f = open(dnnlib.make_run_dir_path(str(seed) + '.json'), 'w')
        json.dump(z.tolist(), f)
        f.close()
Exemple #26
0
 def move_latent(self, npy_dir, Gs_network, Gs_syn_kwargs, *args):
     latent_vector = np.load(npy_dir)[np.newaxis, :]
     smile, age, gender, beauty, angleh, anglep, raceblack, raceyellow, racewhite = args
     new_latent_vector = latent_vector.copy()
     new_latent_vector[0][:8] = (
         latent_vector[0] + smile * self.smile_drt + age * self.age_drt +
         gender * self.gender_drt + beauty * self.beauty_drt +
         angleh * self.angleh_drt + anglep * self.anglep_drt +
         raceblack * self.raceblack_drt + raceyellow * self.raceyellow_drt +
         racewhite * self.racewhite_drt)[:8]
     with self.graph.as_default():
         with self.session.as_default():
             images = Gs_network.components.synthesis.run(
                 new_latent_vector, **Gs_syn_kwargs)
     PIL.Image.fromarray(images[0], 'RGB').save(
         dnnlib.make_run_dir_path('./static/img/edit_face.jpg'))
Exemple #27
0
    def generate_image(self):
        seed = int(self.seed_text_box.text())
        # seed = int(datetime.now().timestamp())
        rnd = np.random.RandomState(seed)
        # z = rnd.randn(1, *self.Gs.input_shape[1:])  # [minibatch, component]
        z = self.get_z_vector()

        tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in self.noise_vars})  # [height, width]

        w = self.get_w_vector()
        images = self
        # images = self.Gs.run(z, None, **self.Gs_kwargs)  # [minibatch, height, width, channel]

        img = PIL.Image.fromarray(images[0], 'RGB')
        img.save(dnnlib.make_run_dir_path('seed%04d.png' % seed))
        self.set_image(img)
def generate_grids(network,
                   seeds,
                   latent_pair,
                   n_samples_per=10,
                   bound=2,
                   rot=0,
                   load_gan=False):
    tflib.init_tf()
    print('Loading networks from "%s"...' % network)
    if load_gan:
        _G, _D, I, G = misc.load_pkl(network)
    else:
        E, G = get_return_v(misc.load_pkl(network), 2)

    G_kwargs = dnnlib.EasyDict()
    G_kwargs.is_validation = True
    G_kwargs.randomize_noise = True
    G_kwargs.minibatch_size = 8

    distance_measure = misc.load_pkl(
        'http://d36zk2xti64re0.cloudfront.net/stylegan1/networks/metrics/vgg16_zhang_perceptual.pkl'
    )

    distance_ls = []
    for seed_idx, seed in enumerate(seeds):
        print('Generating images for seed %d (%d/%d) ...' %
              (seed, seed_idx, len(seeds)))
        rnd = np.random.RandomState(seed)
        z = sample_grid_z(rnd, G, latent_pair, n_samples_per, bound, rot)
        images = get_return_v(
            G.run(z, None, **G_kwargs),
            1)  # [n_samples_per*n_samples_per, channel, height, width]

        distance_ls.append(
            measure_distance(images, n_samples_per, distance_measure))

        images = add_outline(images, width=1)
        n_samples_square, c, h, w = np.shape(images)
        assert n_samples_square == n_samples_per * n_samples_per
        images = np.reshape(images, (n_samples_per, n_samples_per, c, h, w))
        images = np.transpose(images, [0, 3, 1, 4, 2])
        images = np.reshape(images, (n_samples_per * h, n_samples_per * w, c))
        images = misc.adjust_dynamic_range(images, [0, 1], [0, 255])
        images = np.rint(images).clip(0, 255).astype(np.uint8)
        PIL.Image.fromarray(images, 'RGB').save(
            dnnlib.make_run_dir_path('seed%04d.png' % seed))
    print('mean_distance:', np.mean(np.array(distance_ls)))
Exemple #29
0
def project_real_other_images(network_pkl, data_dir, num_snapshots, create_new_G, new_func_name):
    print('Loading networks from "%s"...' % network_pkl)
    tflib.init_tf()
    # _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    _G, _D, I, Gs = misc.load_pkl(network_pkl)
    proj = projector_vc2.ProjectorVC2()
    proj.set_network(Gs, create_new_G, new_func_name)

    img_paths = glob.glob(os.path.join(data_dir, '*'))
    num_images = len(img_paths)
    for image_idx, img_path in enumerate(img_paths):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images = np.array(Image.open(img_path).convert('RGB'))
        images = np.transpose(images, (2, 0, 1))
        images = np.reshape(images, [1]+list(images.shape))
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        project_image(proj, targets=images, I_net=I, png_prefix=dnnlib.make_run_dir_path('image%04d-' % image_idx), num_snapshots=num_snapshots)
Exemple #30
0
def generate_mutated_grid(submit_config,
                          network_pkl,
                          truncation_psi,
                          latents_file,
                          minibatch_size=4):
    print('starting process of generating grid of variants of ' + latents_file)

    tflib.init_tf({'rnd.np_random_seed': 1000})

    grid_size = (128, 1)
    grid_labels = []

    f = open(latents_file, 'r')
    original_latents = np.array(json.load(f))
    f.close()
    print('loaded original latents from ' + latents_file)

    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    w_avg = Gs.get_var('dlatent_avg')  # [component]

    Gs_syn_kwargs = dnnlib.EasyDict()
    Gs_syn_kwargs.randomize_noise = False
    Gs_syn_kwargs.minibatch_size = minibatch_size

    all_latents = []
    ltnts = original_latents[0]
    print('Generating W vectors...')
    for i in range(grid_size[0] * grid_size[1]):
        ltnts = mutate_latents(ltnts, 4)
        all_latents.append(ltnts)
    all_z = np.stack(all_latents)
    #    all_z = np.stack([mutate_latents(original_latents[0], i) for i in range(grid_size[0]*grid_size[1])])
    all_w = Gs.components.mapping.run(all_z,
                                      None)  # [minibatch, layer, component]
    all_w = w_avg + (all_w -
                     w_avg) * truncation_psi  # [minibatch, layer, component]

    print('Generating images...')
    all_images = Gs.components.synthesis.run(
        all_w, **Gs_syn_kwargs)  # [minibatch, height, width, channel]
    misc.save_image_grid(all_images,
                         dnnlib.make_run_dir_path('latentmod-1.png'),
                         drange=[-1, 1],
                         grid_size=grid_size)