Beispiel #1
0
def project_real_images(network_pkl, dataset_name, data_dir, num_images,
                        num_snapshots):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       tfrecord_dir=dataset_name,
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        print("read image from database:", images.shape)
        import cv2
        cv2.imwrite("test_project_real_images_image_from_db.png", images)
        project_image(proj,
                      targets=images,
                      png_prefix=dnnlib.make_run_dir_path('image%04d-' %
                                                          image_idx),
                      num_snapshots=num_snapshots)
Beispiel #2
0
def project_generated_images(network_pkl, seeds, num_snapshots,
                             truncation_psi):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.randomize_noise = False
    Gs_kwargs.truncation_psi = truncation_psi

    for seed_idx, seed in enumerate(seeds):
        print('Projecting seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:])
        tflib.set_vars(
            {var: rnd.randn(*var.shape.as_list())
             for var in noise_vars})
        images = Gs.run(z, None, **Gs_kwargs)
        project_image(proj,
                      targets=images,
                      labels=None,
                      png_prefix=dnnlib.make_run_dir_path('seed%04d-' % seed),
                      num_snapshots=num_snapshots,
                      save_npy=False,
                      npy_file_prefix='NONAME')
def project_real_images(dataset_name, data_dir, num_images, num_snapshots):
    proj = projector.Projector()
    #proj.verbose = True
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = training.dataset.load_dataset(data_dir=data_dir,
                                                tfrecord_dir=dataset_name,
                                                max_label_size=0,
                                                verbose=True,
                                                repeat=False,
                                                shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = training.misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        print("will run projector")
        run_projector.project_image(
            proj,
            targets=images,
            png_prefix=dnnlib.make_run_dir_path('projection/out/image%04d-' %
                                                image_idx),
            num_snapshots=num_snapshots)
        print("done running projector")
Beispiel #4
0
def project_real_images(network_pkl, dataset_name, data_dir, num_images,
                        num_snapshots):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       tfrecord_dir=dataset_name,
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[
        1:], "%sexpected shape %s, got %s%s" % (
            dnnlib.util.Col.RB, Gs.output_shape[1:], dataset_obj.shape,
            dnnlib.util.Col.AU)

    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        project_image(proj,
                      targets=images,
                      png_prefix=dnnlib.make_run_dir_path('image%04d-' %
                                                          image_idx),
                      num_snapshots=num_snapshots)
Beispiel #5
0
def project_generated_images(network_pkl, seeds, num_snapshots, num_steps,
                             truncation_psi, save_target_dlatent,
                             save_every_dlatent, save_final_dlatent):
    assert num_snapshots <= num_steps, "Can't have more snapshots than number of steps taken!"
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector(num_steps=num_steps)
    proj.set_network(Gs)
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.randomize_noise = False
    Gs_kwargs.truncation_psi = truncation_psi

    for seed_idx, seed in enumerate(seeds):
        print('Projecting seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:])
        tflib.set_vars(
            {var: rnd.randn(*var.shape.as_list())
             for var in noise_vars})
        w = Gs.components.mapping.run(z, None)
        if save_target_dlatent:
            np.save(dnnlib.make_run_dir_path('seed%04d.npy' % seed), w)
        images = Gs.components.synthesis.run(w, **Gs_kwargs)
        project_image(proj,
                      targets=images,
                      png_prefix=dnnlib.make_run_dir_path('seed%04d-' % seed),
                      num_snapshots=num_snapshots,
                      save_every_dlatent=save_every_dlatent,
                      save_final_dlatent=save_final_dlatent)
Beispiel #6
0
def main():
    # Align 1024x1024 face image from raw images using face shape predictor
    landmarks_detector = LandmarksDetector(
        'models/shape_predictor_68_face_landmarks.dat')
    for img_name in [f for f in os.listdir('raw_images') if f[0] not in '._']:
        raw_img_path = os.path.join('raw_images', img_name)
        for i, face_landmarks in enumerate(
                landmarks_detector.get_landmarks(raw_img_path), start=1):
            face_img_name = '%s_%02d.png' % (os.path.splitext(img_name)[0], i)
            aligned_face_path = os.path.join('aligned_images', face_img_name)
            os.makedirs('aligned_images', exist_ok=True)
            image_align(raw_img_path, aligned_face_path, face_landmarks)

    # Load the model
    network_pkl = "results/00005-stylegan2-500_128_passport-1gpu-config-f/network-snapshot-000361.pkl"
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector(vgg16_pkl="models/vgg16_zhang_perceptual.pkl",
                               num_steps=1000,
                               initial_learning_rate=0.1,
                               initial_noise_factor=0.05,
                               verbose=False)
    proj.set_network(Gs)
    src_files = sorted([
        os.path.join('aligned_images', f) for f in os.listdir('aligned_images')
        if f[0] not in '._'
    ])
    for src_file in src_files:
        project_image(proj, src_file, 'generated_images', '.stylegan2-tmp')
        shutil.rmtree('.stylegan2-tmp')
Beispiel #7
0
def project_real_images(network_pkl, dataset_name, data_dir, num_images,
                        num_snapshots):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       tfrecord_dir=dataset_name,
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    imgs = read_images('/gdata2/fengrl/imgs-for-embed')

    for image_idx in range(len(imgs)):
        print('Projecting image %d/%d ...' % (image_idx, len(imgs)))
        # images, _labels = dataset_obj.get_minibatch_np(1)
        images = np.expand_dims(imgs[image_idx], 0)
        # images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        project_image(proj,
                      targets=images,
                      png_prefix=dnnlib.make_run_dir_path('image%04d-' %
                                                          image_idx),
                      num_snapshots=num_snapshots)
Beispiel #8
0
def project_real_images(network_pkl, dataset_name, data_dir, num_images,
                        num_steps, num_snapshots, save_every_dlatent,
                        save_final_dlatent):
    assert num_snapshots <= num_steps, "Can't have more snapshots than number of steps taken!"
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector(num_steps=num_steps)
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       tfrecord_dir=dataset_name,
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        try:
            images, _labels = dataset_obj.get_minibatch_np(1)
            images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
            project_image(proj,
                          targets=images,
                          png_prefix=dnnlib.make_run_dir_path('image%04d-' %
                                                              image_idx),
                          num_snapshots=num_snapshots,
                          save_every_dlatent=save_every_dlatent,
                          save_final_dlatent=save_final_dlatent)
        except tf.errors.OutOfRangeError:
            print(
                f'Error! There are only {image_idx} images in {data_dir}{dataset_name}!'
            )
            sys.exit(1)
Beispiel #9
0
def project_real_images(submit_config, network_pkl, dataset_name, data_dir,
                        num_images, num_snapshots):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.verbose = submit_config.verbose
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       tfrecord_dir=dataset_name,
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)
    print('dso shape: ' + str(dataset_obj.shape) + ' vs gs shape: ' +
          str(Gs.output_shape[1:]))
    assert dataset_obj.shape == Gs.output_shape[1:]

    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        project_image(proj,
                      targets=images,
                      png_prefix=dnnlib.make_run_dir_path('image%04d-' %
                                                          image_idx),
                      num_snapshots=num_snapshots)
Beispiel #10
0
def project_real_images(dataset_name, data_dir, num_images, num_snapshots):
    proj = projector.Projector(num_steps)
    proj.set_network(Gs)
    print('Loading images from "%s"...' % dataset_name)

    dataset_obj = training.dataset.load_dataset(data_dir=data_dir,
                                                tfrecord_dir=dataset_name,
                                                max_label_size=0,
                                                verbose=True,
                                                repeat=False,
                                                shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]
    latents = []
    for row, image_idx in enumerate(range(num_images)):
        print('Projecting image %d/%d ...' % ((image_idx + 1), num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = training.misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        run_projector.project_image(proj,
                                    targets=images,
                                    png_prefix=dnnlib.make_run_dir_path(
                                        '/tmp/%s-' % files[image_idx]),
                                    num_snapshots=num_snapshots)
        # Save tmp copy in Google Drive
        save_latent(proj.get_dlatents(), '/latents/%s' % files[row])
        # Add to array
        latents.append(proj.get_dlatents())

    # Returns all latents projected
    return latents
Beispiel #11
0
def project(src, dst, iters):
    global NUM_ITERS
    args = Arguments()
    args.src = src
    args.dst = dst
    args.num_steps = iters

    print('Loading networks from "%s"...' % args.network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(args.network_pkl)
    proj = projector.Projector(
        vgg16_pkl=args.vgg16_pkl,
        num_steps=args.num_steps,
        initial_learning_rate=args.initial_learning_rate,
        initial_noise_factor=args.initial_noise_factor,
        verbose=args.verbose)
    proj.set_network(Gs)

    src_files = sorted([
        os.path.join(args.src_dir, f) for f in os.listdir(args.src_dir)
        if f[0] not in '._'
    ])
    for src_file in src_files:
        project_image(proj,
                      src_file,
                      args.dst_dir,
                      args.tmp_dir,
                      video=args.video)
        if args.video:
            render_video(src_file, args.dst_dir, args.tmp_dir, args.num_steps,
                         args.video_mode, args.video_size, args.video_fps,
                         args.video_codec, args.video_bitrate)
        shutil.rmtree(args.tmp_dir)
Beispiel #12
0
def project_file(src_file,dst_dir,video_or_not=False,tmp_dir='./stylegan2-tmp'):
    _G, _D, Gs = pretrained_networks.load_networks('gdrive:networks/stylegan2-ffhq-config-f.pkl')
    # 调用Projector
    proj = projector.Projector(
        vgg16_pkl='.\models\\vgg16_zhang_perceptual.pkl',
        num_steps=1000,
        initial_learning_rate=0.1,
        initial_noise_factor=0.05,
        verbose=video_or_not
    )
    # 为Projector设定StyleGAN2网络模型
    proj.set_network(Gs)
    video=video_or_not
    # 遍历源文件目录下的所有图片
    project_image(proj, src_file, dst_dir, tmp_dir, video=video)
    video_mode=1
    video_size=1024
    video_fps=25
    video_codec='libx264'
    video_bitrate='5M'
    num_steps=1000
        # 如果配置了video选项,调用render_video,将优化过程图像写入视频流
    if video:
        render_video(
            src_file, dst_dir, tmp_dir, num_steps, video_mode,
            video_size, video_fps, video_codec, video_bitrate
        )
    shutil.rmtree(tmp_dir)
def project_generated_images(network_pkl, seeds, num_snapshots, num_steps,
    truncation_psi, save_snapshots=False, save_latents=False, save_umap=False,
    save_tiles=False):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)
    proj.num_steps = num_steps
    noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.randomize_noise = False
    Gs_kwargs.truncation_psi = truncation_psi

    latents = np.zeros((len(seeds), Gs.input_shape[1]), dtype=np.float32)
    tiles = [None] * num_images
    for seed_idx, seed in enumerate(seeds):
        print('Projecting seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:])
        tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars})
        images = Gs.run(z, None, **Gs_kwargs)
        tiles[image_idx] = images[0, ...].transpose(1, 2, 0)
        latents[seed_idx, ...] = project_image(proj, targets=images,
            png_prefix=dnnlib.make_run_dir_path('seed%04d-' % seed),
            num_snapshots=num_snapshots, save_snapshots=save_snapshots)

        if save_latents:
            filename = dnnlib.make_run_dir_path('generated_image_latent_{:06d}'.format(image_idx))
            np.save(filename, latents[image_idx, ...])

    if save_latents:
        filename = dnnlib.make_run_dir_path('generated_image_latents.npy')
        np.save(filename, latents)

    if save_umap:
        reducer = umap.UMAP()
        embeddings = reducer.fit_transform(latents)
        filename = dnnlib.make_run_dir_path('generated_image_umap.json')
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(embeddings.tolist(), f, ensure_ascii=False)

    if save_tiles:
        tiles_prefix = dnnlib.make_run_dir_path('generated_tile_solid')
        misc.save_texture_grid(tiles, tiles_prefix)

        textures_prefix = dnnlib.make_run_dir_path('generated_texture_solid')
        textures = [misc.make_white_square() for _ in range(len(tiles))]
        misc.save_texture_grid(textures, textures_prefix)

        filename = dnnlib.make_run_dir_path('labels.json')
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump([0.0] * len(tiles), f, ensure_ascii=False)
Beispiel #14
0
def main():

    parser = argparse.ArgumentParser(description='Project real-world images into StyleGAN2 latent space')
    parser.add_argument('src_dir', help='Directory with aligned images for projection')
    parser.add_argument('dst_dir', help='Output directory')
   # parser.add_argument('src_files', help = "For smaller batches")
    parser.add_argument('start_batch', type=int, help = "For smaller batches")
    parser.add_argument('end_batch', type=int, help = "For smaller batches")
    parser.add_argument('--tmp-dir', default='.stylegan2-tmp', help='Temporary directory for tfrecords and video frames')
    parser.add_argument('--network-pkl', default='gdrive:networks/stylegan2-ffhq-config-f.pkl', help='StyleGAN2 network pickle filename')
    parser.add_argument('--vgg16-pkl', default='https://nvlabs-fi-cdn.nvidia.com/stylegan/networks/metrics/vgg16_zhang_perceptual.pkl', help='VGG16 network pickle filename')
    parser.add_argument('--num-steps', type=int, default=1000, help='Number of optimization steps')
    parser.add_argument('--initial-learning-rate', type=float, default=0.1, help='Initial learning rate')
    parser.add_argument('--initial-noise-factor', type=float, default=0.05, help='Initial noise factor')
    parser.add_argument('--verbose', type=bool, default=False, help='Verbose output')
    tiled_parser = parser.add_mutually_exclusive_group(required=False)
    tiled_parser.add_argument('--tiled', dest='tiled', action='store_true', help='Tiled dlatents (default)')
    tiled_parser.add_argument('--no-tiled', dest='tiled', action='store_false', help='Non-tiled dlatents')
    parser.set_defaults(tiled=True)
    parser.add_argument('--video', type=bool, default=False, help='Render video of the optimization process')
    parser.add_argument('--video-mode', type=int, default=1, help='Video mode: 1 for optimization only, 2 for source + optimization')
    parser.add_argument('--video-size', type=int, default=1024, help='Video size (height in px)')
    parser.add_argument('--video-fps', type=int, default=25, help='Video framerate')
    parser.add_argument('--video-codec', default='libx264', help='Video codec')
    parser.add_argument('--video-bitrate', default='5M', help='Video bitrate')
    args = parser.parse_args()

    print('Loading networks from "%s"...' % args.network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(args.network_pkl)
    proj = projector.Projector(
        vgg16_pkl             = args.vgg16_pkl,
        num_steps             = args.num_steps,
        initial_learning_rate = args.initial_learning_rate,
        initial_noise_factor  = args.initial_noise_factor,
        verbose               = args.verbose,
        tiled                 = args.tiled
    )
    proj.set_network(Gs)

    src_files = sorted([os.path.join(args.src_dir, f) for f in os.listdir(args.src_dir) if f[0] not in '._'])
   # start = int(args.start_batch)
   # end =  int(args.end_batch)
   # print(args.start_batch)
    src_files = src_files[args.start_batch:args.end_batch]
    for src_file in src_files:
        project_image(proj, src_file, args.dst_dir, args.tmp_dir, video=args.video)
        if args.video:
            render_video(
                src_file, args.dst_dir, args.tmp_dir, args.num_steps, args.video_mode,
                args.video_size, args.video_fps, args.video_codec, args.video_bitrate
            )
        shutil.rmtree(args.tmp_dir)
def project_real_images(network_pkl, dataset_name, data_dir, num_images,
    num_snapshots, num_steps, save_snapshots=False, save_latents=False,
    save_umap=False, save_tiles=False):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)
    proj.num_steps = num_steps

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir, tfrecord_dir=dataset_name, max_label_size=0, repeat=False, shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    latents = np.zeros((num_images, Gs.input_shape[1]), dtype=np.float32)
    tiles = [None] * num_images
    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        tiles[image_idx] = images[0, ...].transpose(1, 2, 0)
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        latents[image_idx, ...] = project_image(proj, targets=images,
            png_prefix=dnnlib.make_run_dir_path('image%04d-' % image_idx),
            num_snapshots=num_snapshots, save_snapshots=save_snapshots)

        if save_latents:
            filename = dnnlib.make_run_dir_path('real_image_latent_{:06d}'.format(image_idx))
            np.save(filename, latents[image_idx, ...])


    if save_latents:
        filename = dnnlib.make_run_dir_path('real_image_latents.npy')
        np.save(filename, latents)

    if save_umap:
        reducer = umap.UMAP()
        embeddings = reducer.fit_transform(latents)
        filename = dnnlib.make_run_dir_path('real_image_umap.json')
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(embeddings.tolist(), f, ensure_ascii=False)

    if save_tiles:
        tiles_prefix = dnnlib.make_run_dir_path('real_tile_solid')
        misc.save_texture_grid(tiles, tiles_prefix)

        textures_prefix = dnnlib.make_run_dir_path('real_texture_solid')
        textures = [misc.make_white_square() for _ in range(len(tiles))]
        misc.save_texture_grid(textures, textures_prefix)

        filename = dnnlib.make_run_dir_path('labels.json')
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump([0.0] * len(tiles), f, ensure_ascii=False)
Beispiel #16
0
def get_projected_real_images(dataset_name, data_dir, num_images, num_snapshots,num_steps, _Gs):
    proj = projector.Projector()
    proj.set_network(_Gs)
    proj.num_steps = num_steps

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir, tfrecord_dir=dataset_name, max_label_size=0, repeat=False, shuffle_mb=0)
    assert dataset_obj.shape == _Gs.output_shape[1:]

    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        out = run_projector.get_projected_images(proj, targets=images, num_snapshots=num_snapshots)

    return out
Beispiel #17
0
def main():
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='Project real-world images into StyleGAN2 latent space')
    parser.add_argument('src_dir', help='Directory with aligned images for projection')
    parser.add_argument('dst_dir', help='Output directory')
    parser.add_argument('--tmp-dir', default='./stylegan2-tmp', help='Temporary directory for tfrecords and video frames')
    parser.add_argument('--network-pkl', default='gdrive:networks/stylegan2-ffhq-config-f.pkl', help='StyleGAN2 network pickle filename')
    parser.add_argument('--vgg16-pkl', default='.\models\\vgg16_zhang_perceptual.pkl', help='VGG16 network pickle filename')
    parser.add_argument('--num-steps', type=int, default=1000, help='Number of optimization steps')
    parser.add_argument('--initial-learning-rate', type=float, default=0.1, help='Initial learning rate')
    parser.add_argument('--initial-noise-factor', type=float, default=0.05, help='Initial noise factor')
    parser.add_argument('--verbose', type=bool, default=False, help='Verbose output')
    parser.add_argument('--video', type=bool, default=False, help='Render video of the optimization process')
    parser.add_argument('--video-mode', type=int, default=1, help='Video mode: 1 for optimization only, 2 for source + optimization')
    parser.add_argument('--video-size', type=int, default=1024, help='Video size (height in px)')
    parser.add_argument('--video-fps', type=int, default=25, help='Video framerate')
    parser.add_argument('--video-codec', default='libx264', help='Video codec')
    parser.add_argument('--video-bitrate', default='5M', help='Video bitrate')
    args = parser.parse_args()
 
    print('Loading networks from "%s"...' % args.network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(args.network_pkl)
    # 调用Projector
    proj = projector.Projector(
        vgg16_pkl             = args.vgg16_pkl,
        num_steps             = args.num_steps,
        initial_learning_rate = args.initial_learning_rate,
        initial_noise_factor  = args.initial_noise_factor,
        verbose               = args.verbose
    )
    # 为Projector设定StyleGAN2网络模型
    proj.set_network(Gs)
 
    src_files = sorted([os.path.join(args.src_dir, f) for f in os.listdir(args.src_dir) if f[0] not in '._'])
    # 遍历源文件目录下的所有图片
    for src_file in src_files:
        # 调用project_image
        project_image(proj, src_file, args.dst_dir, args.tmp_dir, video=args.video)
        # 如果配置了video选项,调用render_video,将优化过程图像写入视频流
        if args.video:
            render_video(
                src_file, args.dst_dir, args.tmp_dir, args.num_steps, args.video_mode,
                args.video_size, args.video_fps, args.video_codec, args.video_bitrate
            )
        shutil.rmtree(args.tmp_dir)
Beispiel #18
0
def encode_image(args):
    print('Loading networks from "%s"...' % args.network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(args.network_pkl)
    proj = projector.Projector(
        vgg16_pkl=args.vgg16_pkl,
        num_steps=args.num_steps,
        initial_learning_rate=args.initial_learning_rate,
        initial_noise_factor=args.initial_noise_factor,
        verbose=args.verbose)
    proj.set_network(Gs)
    if args.align:
        align(args)
    src_files = sorted([
        os.path.join(args.src_dir, f) for f in os.listdir(args.src_dir)
        if f[0] not in '._'
    ])
    for src_file in src_files:
        project_image(proj, src_file, args.dst_dir, args.tmp_dir)
Beispiel #19
0
def my_project_real_images(num_images, data_dir): 
    network_pkl = 'gdrive:networks/stylegan2-ffhq-config-f.pkl'
    dataset_name = 'dataset'
    #data_dir = 'my' 
    num_snapshots = 5

    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir, tfrecord_dir=dataset_name, max_label_size=0, repeat=False, shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    os.makedirs(data_dir+'/real_images', exist_ok=True)  
    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        
        targets=images
        png_prefix=data_dir+'/real_images/image'+str(image_idx)
        num_snapshots=num_snapshots
                
        snapshot_steps = set(proj.num_steps - np.linspace(0, proj.num_steps, num_snapshots, endpoint=False, dtype=int))
        misc.save_image_grid(targets, png_prefix + 'target.png', drange=[-1,1])
        proj.start(targets)
        while proj.get_cur_step() < proj.num_steps:
            print('\r%d / %d ... ' % (proj.get_cur_step(), proj.num_steps), end='', flush=True)
            proj.step()
            if proj.get_cur_step() in snapshot_steps:
                misc.save_image_grid(proj.get_images(), png_prefix + 'step%04d.png' % proj.get_cur_step(), drange=[-1,1])
            
            if proj.get_cur_step() == proj.num_steps:  
                vec = proj.get_dlatents() 
                if image_idx == 0:
                   vec_syn = vec
                else:
                   vec_syn = np.concatenate([vec_syn, vec])  
                print(vec_syn.shape)  
        print('\r%-30s\r' % '', end='', flush=True)

    return vec_syn
Beispiel #20
0
  def project(self):
    network_pkl = make_gcloud_link(self.network_pkl_gcloud_id)
    vgg16_pkl = make_gcloud_link(self.vgg16_pkl_gcloud_id)
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector(
        vgg16_pkl             = vgg16_pkl,
        num_steps             = self.num_steps,
        initial_learning_rate = self.initial_learning_rate,
        initial_noise_factor  = self.initial_noise_factor,
        verbose               = self.verbose,
        dlatent_avg_fname     = self.dlatent_avg_fname
    )
    proj.set_network(Gs)

    src_files = sorted([os.path.join(self.src_dir, f) for f in os.listdir(self.src_dir) if f[0] not in '._'])
    for src_file in src_files:
        project_image(proj, src_file, self.dst_dir, self.tmp_dir, video=False)
        shutil.rmtree(self.tmp_dir)
Beispiel #21
0
def project_real_images(network_pkl, dataset_name, data_dir, num_images,
                        start_index, num_snapshots, save_vector):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    print('Num images: %d, Starting Index: %d' % (num_images, start_index))
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       verbose=True,
                                       tfrecord_dir=dataset_name,
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    img_filenames = None
    if dataset_obj._np_filenames is not None:
        assert num_images <= dataset_obj.filenames_size
        img_filenames = dataset_obj._np_filenames

    for image_idx in range(start_index, start_index + num_images):
        filename = img_filenames[
            image_idx] if img_filenames is not None else 'unknown'
        print('Projecting image %d/%d... (index: %d, filename: %s)' %
              (image_idx - start_index, num_images, image_idx, filename))

        images, labels = dataset_obj.get_minibatch_np(1)
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])

        project_image(proj,
                      targets=images,
                      labels=labels,
                      png_prefix=dnnlib.make_run_dir_path('image%04d-' %
                                                          image_idx),
                      num_snapshots=num_snapshots,
                      save_npy=save_vector,
                      npy_file_prefix=dnnlib.make_run_dir_path(filename))
        print(
            '✅ Finished projecting image %d/%d... (index: %d, filename: %s)' %
            (image_idx - start_index + 1, num_images, image_idx, filename))
def project_real_images(Gs,
                        data_dir,
                        dataset_name,
                        snapshot_name,
                        seq_no,
                        num_snapshots=5):
    proj = projector.Projector()
    proj.set_network(Gs)

    print('Loading images from "%s/%s"...' % (data_dir, dataset_name))
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       tfrecord_dir=dataset_name,
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    print('Projecting image ...')
    images, _labels = dataset_obj.get_minibatch_np(1)
    images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
    project_image(proj,
                  targets=images,
                  png_prefix=dnnlib.make_run_dir_path('%s/image%04d-' %
                                                      (snapshot_name, seq_no)),
                  num_snapshots=num_snapshots)
    ####################
    # print dlatents
    ####################
    dlatents = proj.get_dlatents()
    # for dlatents1 in dlatents:
    #     for dlatents2 in dlatents1:
    #         str = ''
    #         for e in dlatents2:
    #             str = '{} {}'.format(str, e)
    #         print('###', str)
    # img_name = f'100-100_01'
    # dir = 'results/dst'
    # img_name = '100-100_01.npy'
    # dir = 'results/src'
    # img_name = 'me_01.npy'
    # np.save(os.path.join(dir, img_name), dlatents[0])
    return dlatents[0]
Beispiel #23
0
def gen_disc_process(weight,
                     numsteps,
                     threshold_set,
                     threshold_dataset_dir,
                     test_set,
                     test_dataset_dir,
                     queue_proj=None):
    print('Loading networks from "%s"...' % weight)
    _, D, Gs = pretrained_networks.load_networks(weight)
    proj = projector.Projector()
    proj.set_network(Gs)
    proj.num_steps = numsteps

    threshold_results = gen_disc_test(proj, D, threshold_set,
                                      threshold_dataset_dir)
    test_results = gen_disc_test(proj, D, test_set, test_dataset_dir)

    if queue_proj is not None:
        queue_proj.put((threshold_results, test_results))
    return threshold_results, test_results
Beispiel #24
0
def test(argv):
    ip = argv[1]
    p = projector.Projector(name='test', address=ip)
    p.on('state_changed', on_state_changed)
    p.start()
    while True:
        cmd = raw_input('Command>')
        if cmd == 'on':
            try:
                p.power_on()
            except Exception as e:
                print e
        elif cmd == 'off':
            try:
                p.power_off()
            except Exception as e:
                print e
        elif cmd == 'exit':
            break
    p.stop()
Beispiel #25
0
def run_network(args, start_bin, end_bin):
    log_data = False
    if args.logfile != '':
        log_data = True

    if log_data:
        logging.basicConfig(level=logging.INFO, filename=args.logfile)
        logging.info('start logging')

    print('Loading networks from "%s"...' % args.network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(args.network_pkl)
    proj = projector.Projector(
        vgg16_pkl=args.vgg16_pkl,
        num_steps=args.num_steps,
        initial_learning_rate=args.initial_learning_rate,
        initial_noise_factor=args.initial_noise_factor,
        verbose=args.verbose)
    proj.set_network(Gs)

    # src_files = [os.path.join(args.src_dir, '{:05d}'.format(k) + '_01.png') for k in range(70000)]
    src_files = sorted([
        os.path.join(args.src_dir, f) for f in os.listdir(args.src_dir)
        if f[0] not in '._'
    ])
    for cnt, src_file in enumerate(src_files):
        logging.info('Processing ' + src_file)
        if cnt < start_bin or cnt > end_bin:
            continue
        if not os.path.isfile(src_file):
            logging.info('No file ' + src_file)
            continue
        project_image(proj,
                      src_file,
                      args.dst_dir,
                      args.tmp_dir,
                      video=args.video)
        if args.video:
            render_video(src_file, args.dst_dir, args.tmp_dir, args.num_steps,
                         args.video_mode, args.video_size, args.video_fps,
                         args.video_codec, args.video_bitrate)
        shutil.rmtree(args.tmp_dir)
def main():
    print('Loading networks from "%s"...' % a.model)
    sess = tflib.init_tf()
    with open(a.model, 'rb') as file:
        network = pickle.load(file, encoding='latin1')
        try: _, _, Gs = network
        except:    Gs = network
    resolution = tuple(Gs.output_shape[2:])
    proj = projector.Projector(a.steps)
    proj.set_network(Gs)

    img_files = img_list(a.in_dir)
    num_images = len(img_files)
    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx+1, num_images))
        images = img_read(img_files[image_idx])
        images = np.expand_dims(np.transpose(images, [2,0,1]), 0)
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        work_dir = osp.join(a.out_dir, basename(img_files[image_idx]))
        os.makedirs(work_dir, exist_ok=True)
        project_image(proj, images, work_dir, resolution, a.num_snapshots)
Beispiel #27
0
    def project(self, R, sDist=50, sHeight=600, plotting=False):
        p = projector.Projector()
        #points = self.project2D()
        hull = self.modelCorners()
        dmin = 1e9
        ind = -1
        for i in numpy.argsort(self.edgeLength(hull))[-2:]:
            dist = numpy.abs(numpy.mean([hull[i], hull[i + 1]], axis=0)[1])

            if dist < dmin:
                dmin = dist
                ind = i
        projPt2D = hull[ind:ind + 2, :]
        projPt3D = projector.projectPlane(projPt2D)
        center = numpy.mean(projPt3D, axis=0)

        n = projector.unitv(
            numpy.cross([0, 0, 1], numpy.diff(projPt3D, axis=0)))
        rPosition = center + 1 * n * sDist + [0, 0, sHeight]

        vt = projector.virtual(center, rPosition)

        seg = projector.Segments(projPt3D)

        gltm = projector.segProjections(vt, seg)

        trunk = os.getcwd()[0:(6 + os.getcwd().rfind('trunk'))]
        R.setToolFile(trunk + 'data/tool.object.projector')
        R.setCartesian(vt)

        p.send(gltm)
        p.doorOpen(1)

        if plotting:
            plt.plot(hull[:, 0], hull[:, 1], '.', hull[ind:ind + 2, 0],
                     hull[ind:ind + 2, 1], rPosition[0], rPosition[1], 'o')
            plt.show()
        else:
            raw_input('projecting member ')
        p.doorOpen(0)
Beispiel #28
0
def project_real_images(network_pkl, dataset_name, data_dir, num_images,
                        num_snapshots):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       tfrecord_dir=dataset_name,
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]
    try:
        os.remove('latents.txt')
    except OSError:
        pass
    all_latents = list()
    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx + 1, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        all_latents.append(
            np.mean(project_image(proj,
                                  targets=images,
                                  png_prefix=dnnlib.make_run_dir_path(
                                      'image%04d-' % image_idx),
                                  num_snapshots=num_snapshots),
                    axis=0))
    for j in range(len(all_latents) - 1):
        for i in range(j + 1, len(all_latents)):
            #print(f"Euclid dist between {j} and {i}: {np.linalg.norm(all_latents[j]-all_latents[i])}")
            print(
                f"Dot product between {j} and {i}: {np.dot(all_latents[j],all_latents[i])}"
            )
            print(
                f"Cosine product between {j} and {i}: {spatial.distance.cosine(all_latents[j],all_latents[i])}"
            )
Beispiel #29
0
def project_images(Gs,
                   images_dir,
                   tfrecord_dir,
                   data_dir,
                   num_snapshots,
                   pure_projector=False):
    """setup projector"""
    print('Setting up projector')
    proj = projector.Projector()
    proj.set_network(Gs)

    # generate tfrecords
    nb_images = dataset_tool.create_from_images(str(tfrecord_dir),
                                                str(images_dir), True)

    # loading images from tfrecords
    dataset_obj = training.dataset.load_dataset(tfrecord_dir=tfrecord_dir,
                                                max_label_size=0,
                                                verbose=True,
                                                repeat=False,
                                                shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    # project all loaded images
    print('=======================')
    for image_idx in tqdm(range(nb_images)):
        print(f'Projecting image {image_idx + 1}/{nb_images}')

        images, _labels = dataset_obj.get_minibatch_np(1)
        images = training.misc.adjust_dynamic_range(images, [0, 255], [-1, 1])

        run_path = data_dir / f'out_{image_idx}'
        run_path.mkdir()
        run_projector.project_image(proj,
                                    targets=images,
                                    png_prefix=dnnlib.make_run_dir_path(
                                        str(run_path / 'image_')),
                                    num_snapshots=num_snapshots)
Beispiel #30
0
def project_real_images(dataset_name, data_dir, num_images, num_snapshots, model_pkl, steps=1000):

    stream = open(model_pkl, 'rb')

    tflib.init_tf()
    with stream:
        G, D, Gs = pickle.load(stream, encoding='latin1')

    proj = projector.Projector()
    proj.set_network(Gs)
    proj.num_steps = steps

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = training.dataset.load_dataset(data_dir=data_dir, tfrecord_dir=dataset_name, max_label_size=0, verbose=True, repeat=False, shuffle_mb=0)
    print(dataset_obj.shape)
    print(Gs.output_shape)
    assert dataset_obj.shape == Gs.output_shape[1:]

    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = training.misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        run_projector.project_image(proj, targets=images, png_prefix=dnnlib.make_run_dir_path(f'{img_path}image%04d-' % image_idx), num_snapshots=num_snapshots)