Beispiel #1
0
def project_real_images(dataset_name, data_dir, num_images, num_snapshots):
    proj = projector.Projector(num_steps)
    proj.set_network(Gs)
    print('Loading images from "%s"...' % dataset_name)

    dataset_obj = training.dataset.load_dataset(data_dir=data_dir,
                                                tfrecord_dir=dataset_name,
                                                max_label_size=0,
                                                verbose=True,
                                                repeat=False,
                                                shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]
    latents = []
    for row, image_idx in enumerate(range(num_images)):
        print('Projecting image %d/%d ...' % ((image_idx + 1), num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = training.misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        run_projector.project_image(proj,
                                    targets=images,
                                    png_prefix=dnnlib.make_run_dir_path(
                                        '/tmp/%s-' % files[image_idx]),
                                    num_snapshots=num_snapshots)
        # Save tmp copy in Google Drive
        save_latent(proj.get_dlatents(), '/latents/%s' % files[row])
        # Add to array
        latents.append(proj.get_dlatents())

    # Returns all latents projected
    return latents
def project_real_images(dataset_name, data_dir, num_images, num_snapshots):
    proj = projector.Projector()
    #proj.verbose = True
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = training.dataset.load_dataset(data_dir=data_dir,
                                                tfrecord_dir=dataset_name,
                                                max_label_size=0,
                                                verbose=True,
                                                repeat=False,
                                                shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = training.misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        print("will run projector")
        run_projector.project_image(
            proj,
            targets=images,
            png_prefix=dnnlib.make_run_dir_path('projection/out/image%04d-' %
                                                image_idx),
            num_snapshots=num_snapshots)
        print("done running projector")
def project_images(Gs,
                   images_dir,
                   tfrecord_dir,
                   data_dir,
                   num_snapshots,
                   pure_projector=False):
    """setup projector"""
    print('Setting up projector')
    proj = projector.Projector()
    proj.set_network(Gs)

    # generate tfrecords
    nb_images = dataset_tool.create_from_images(str(tfrecord_dir),
                                                str(images_dir), True)

    # loading images from tfrecords
    dataset_obj = training.dataset.load_dataset(tfrecord_dir=tfrecord_dir,
                                                max_label_size=0,
                                                verbose=True,
                                                repeat=False,
                                                shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    # project all loaded images
    print('=======================')
    for image_idx in tqdm(range(nb_images)):
        print(f'Projecting image {image_idx + 1}/{nb_images}')

        images, _labels = dataset_obj.get_minibatch_np(1)
        images = training.misc.adjust_dynamic_range(images, [0, 255], [-1, 1])

        run_path = data_dir / f'out_{image_idx}'
        run_path.mkdir()
        run_projector.project_image(proj,
                                    targets=images,
                                    png_prefix=dnnlib.make_run_dir_path(
                                        str(run_path / 'image_')),
                                    num_snapshots=num_snapshots)
Beispiel #4
0
def project_real_images(dataset_name, data_dir, num_images, num_snapshots, model_pkl, steps=1000):

    stream = open(model_pkl, 'rb')

    tflib.init_tf()
    with stream:
        G, D, Gs = pickle.load(stream, encoding='latin1')

    proj = projector.Projector()
    proj.set_network(Gs)
    proj.num_steps = steps

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = training.dataset.load_dataset(data_dir=data_dir, tfrecord_dir=dataset_name, max_label_size=0, verbose=True, repeat=False, shuffle_mb=0)
    print(dataset_obj.shape)
    print(Gs.output_shape)
    assert dataset_obj.shape == Gs.output_shape[1:]

    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = training.misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        run_projector.project_image(proj, targets=images, png_prefix=dnnlib.make_run_dir_path(f'{img_path}image%04d-' % image_idx), num_snapshots=num_snapshots)