示例#1
0
def interpolate_folder(network_pkl, input_dir, input_dir2, count):
    files = glob.glob(input_dir + "/*.png") + glob.glob(input_dir + "/*.jpg")
    L = []
    for f in files:
        metadata = read_metadata(f)
        z = metadata['z']
        truncation_psi = metadata['truncation_psi']
        L.append((z, truncation_psi))
    if input_dir2:
        files2 = glob.glob(input_dir2 + "/*.png") + glob.glob(input_dir2 + "/*.jpg")
        L2 = []
        for f in files2:
            metadata = read_metadata(f)
            z = metadata['z']
            truncation_psi = metadata['truncation_psi']
            L2.append((z, truncation_psi))    
    else:
        L2 = []
    
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    
        
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False

    random.shuffle(L)
    if L2:
        random.shuffle(L2)
        it = itertools.product(L, L2)    
    else:
        it = itertools.combinations(L, 2)
        
    for x, y in it:  
        psi0, psi1 = x[1], y[1]
        truncation_psi = psi0 + (psi1-psi0)*np.random.random()
        Gs_kwargs.truncation_psi = truncation_psi        
        noise_seed = np.random.randint(0,1000000)
        rnd_noise = np.random.RandomState(noise_seed)
        vars = {var: rnd_noise.randn(*var.shape.as_list()) for var in noise_vars}        
        tflib.set_vars(vars) # [height, width]                   
        for i in range(count):        
            r = (i+1)/(count+1)
            #r = random.random()
            z = (1-r)*x[0] + r*y[0]
            images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]
            im = PIL.Image.fromarray(images[0], 'RGB')  #.save(dnnlib.make_run_dir_path('varysingle_%04d.png' % i))            
            fn = 'if_{:03d}.jpg'.format(int(r*1000))
            metadata = {'z':z, 'truncation_psi':truncation_psi, 'noise_seed':noise_seed}            
            save_with_metadata(im, fn, metadata, True)               
示例#2
0
def generate_blended_image(image):
    tflib.init_tf()
    # download_extract()

    # SAVE FILE TO RAW

    # ALIGN IT

    print('Loading networks from "%s"...' % image)
    # _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    blended_url = "AlfredENeuman24_ADA-VersatileFaces36_ADA_v2-blended-64.pkl"
    ffhq_url = "stylegan2-ffhq-config-f.pkl"

    _, _, Gs_blended = pretrained_networks.load_networks(blended_url)
    _, _, Gs = pretrained_networks.load_networks(ffhq_url)

    # latent_dir = Path("generated")
    # latents = latent_dir.glob("*.npy")
    # for latent_file in latents:
    #     latent = np.load(latent_file)
    #     latent = np.expand_dims(latent,axis=0)
    #     synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=False), minibatch_size=8)
    #     images = Gs_blended.components.synthesis.run(latent, randomize_noise=False, **synthesis_kwargs)
    #     Image.fromarray(images.transpose((0,2,3,1))[0], 'RGB').save(latent_file.parent / (f"{latent_file.stem}-toon.jpg"))
    # imgcat(img)

    return img
示例#3
0
def main():
    # use my copy of the blended model to save Doron's download bandwidth
    # get the original here https://mega.nz/folder/OtllzJwa#C947mCCdEfMCRTWnDcs4qw
    blended_url = "https://drive.google.com/uc?id=1H73TfV5gQ9ot7slSed_l-lim9X7pMRiU"
    ffhq_url = "http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-ffhq-config-f.pkl"

    os.environ['PATH'] = ':'.join(
        ['/usr/local/cuda/bin:/opt/conda/envs/stylegan2/bin:/opt/conda/condabin', os.getenv('PATH')])
    os.environ[
        'LD_LIBRARY_PATH'] = '/usr/local/cuda/lib64'  # ':/usr/local/nccl2/lib:/usr/local/cuda/extras/CUPTI/lib64'
    os.environ[
        'LD_RUN_PATH'] = '/usr/local/cuda/lib64'  # ':/usr/local/nccl2/lib:/usr/local/cuda/extras/CUPTI/lib64'

    _, _, Gs_blended = pretrained_networks.load_networks(blended_url)
    _, _, Gs = pretrained_networks.load_networks(ffhq_url)

    PROJECTED_DIR = sys.argv[1]

    latent_dir = Path(PROJECTED_DIR)
    latents = latent_dir.glob("*.npy")

    for latent_file in latents:
        latent = np.load(latent_file)
        latent = np.expand_dims(latent, axis=0)
        synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=False),
                                minibatch_size=8)
        images = Gs_blended.components.synthesis.run(latent, randomize_noise=False, **synthesis_kwargs)
        Image.fromarray(images.transpose((0, 2, 3, 1))[0], 'RGB').save(
            latent_file.parent / (f"{latent_file.stem}-toon.jpg"))
示例#4
0
def main():
    _G, _D, Gs = pretrained_networks.load_networks("results/00005-stylegan2-500_128_passport-1gpu-config-f/network-snapshot-000361.pkl")
    w_avg = Gs.get_var('dlatent_avg') # [component]

    Gs_syn_kwargs = dnnlib.EasyDict()
    Gs_syn_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    Gs_syn_kwargs.randomize_noise = False
    Gs_syn_kwargs.minibatch_size = 4

    for img_name in [f for f in os.listdir('generated_images') if f[0] not in '._']:
        if not img_name.endswith('.npy'):
            continue
        path = os.path.join('generated_images', img_name)
        input_img = np.load(path)

        seed = random.randint(1,999)
        z = np.stack([np.random.RandomState(seed).randn(*Gs.input_shape[1:])])
        w = Gs.components.mapping.run(z, None)
        w = w_avg + (w + w_avg) * 1.0
        passport = Gs.components.synthesis.run(w, **Gs_syn_kwargs) # [minibatch, height, width, channel]
        w = w[0]
        col_styles = _parse_num_range('0-6')
        input_img[col_styles] = w[col_styles]
        image = Gs.components.synthesis.run(input_img[np.newaxis], **Gs_syn_kwargs)[0]
        PIL.Image.fromarray(image, 'RGB').save(dnnlib.make_run_dir_path('output_images/%s.png' % (os.path.splitext(img_name)[0])))
示例#5
0
def regenerate_folder(network_pkl, input_dir):
    files = glob.glob(input_dir + "/*.png") + glob.glob(input_dir + "/*.jpg")
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    
    for f in files:
        metadata = read_metadata(f)
        print(metadata.keys())
        z = metadata['z']
        truncation_psi = metadata['truncation_psi']
        if 'noise_vars' in metadata:
            noise_vars = metadata['noise_vars']
            vars = {var:noise_vars[name] for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')}
        else:
            rnd = np.random.RandomState(metadata['noise_seed'])
            vars = {var: rnd.randn(*var.shape.as_list()) for var in noise_vars}        
        Gs_kwargs.truncation_psi = truncation_psi
        tflib.set_vars(vars) # [height, width]           
        images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]            
        im = PIL.Image.fromarray(images[0], 'RGB')  #.save(dnnlib.make_run_dir_path('varysingle_%04d.png' % i))
        fn = os.path.splitext(os.path.basename(f))[0] + '-b.png'
        save_with_metadata(im, fn, metadata, False)
示例#6
0
def project_generated_images(network_pkl, seeds, num_snapshots,
                             truncation_psi):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.randomize_noise = False
    Gs_kwargs.truncation_psi = truncation_psi

    for seed_idx, seed in enumerate(seeds):
        print('Projecting seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:])
        tflib.set_vars(
            {var: rnd.randn(*var.shape.as_list())
             for var in noise_vars})
        images = Gs.run(z, None, **Gs_kwargs)
        project_image(proj,
                      targets=images,
                      labels=None,
                      png_prefix=dnnlib.make_run_dir_path('seed%04d-' % seed),
                      num_snapshots=num_snapshots,
                      save_npy=False,
                      npy_file_prefix='NONAME')
示例#7
0
def generate_images_from_w_vectors(network_pkl,
                                   w_vectors_file,
                                   seeds_file=None):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    all_w = np.load(w_vectors_file)
    if seeds_file is not None:
        seeds = np.load(seeds_file)

    Gs_syn_kwargs = dnnlib.EasyDict()
    Gs_syn_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
                                          nchw_to_nhwc=True)
    Gs_syn_kwargs.randomize_noise = False

    print('Generating images...')
    all_images = Gs.components.synthesis.run(
        all_w, **Gs_syn_kwargs)  # [minibatch, height, width, channel]
    if seeds_file is not None:
        for seed, img in zip(seeds, all_images):
            PIL.Image.fromarray(img, 'RGB').save(
                dnnlib.make_run_dir_path('seed%04d.png' % seed))
    else:
        for indx, img in enumerate(all_images):
            PIL.Image.fromarray(img, 'RGB').save(
                dnnlib.make_run_dir_path('%04d.png' % indx))
示例#8
0
def apply_vector_on_backprop(network_pkl, dlatents_files_pattern,
                             direction_path, transformation):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    dlatent_steps = [5, 10, 50, 100, 200, 400, 600, 800, 1000]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
                                      nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False

    if transformation == 'gender':
        with open(direction_path) as f:
            delta = np.array([float(i) for i in f.readlines()[0].split()])
            if len(delta) == 512:
                delta = np.tile(delta, [18, 1])
            else:
                raise Exception('Wrong direction vector')
    elif transformation == 'age':
        with open(direction_path) as f:
            age = json.loads(f.read().replace("'", '"'))
        delta = np.array(age['18-24']) - np.array(age['45-54'])  # 55-130
        if len(delta) == 512:
            delta = np.tile(delta, [18, 1])
        else:
            raise Exception('Wrong direction vector')
    else:
        raise ValueError('Wrong transformation value')

    dlatents_files = glob(dlatents_files_pattern + '*step.txt')
    for file in tqdm(dlatents_files):
        person_name = file.split('/')[-1].split('image')[-1].split('-')[0]
        with open(file) as f:
            for step, line in zip(dlatent_steps, f.readlines()):
                if step in [200, 1000]:  # [10, 50, 100, 200, 1000]:
                    dlatent = np.array([float(i) for i in line.split()])
                    if len(dlatent) == 512:
                        dlatent = np.tile(dlatent, [18, 1])
                    elif len(dlatent) == 512 * 18:
                        dlatent = np.reshape(dlatent, [18, 512])
                    else:
                        raise Exception('Wrong person vector')

                    dlatents_person = []
                    fnames = []

                    alphas = [
                        -1.0 + 0.25 * i for i in range(9) if i not in [3, 5]
                    ]
                    for alpha in alphas:
                        dlatents_person.append(dlatent + alpha * delta)
                        fnames.append('translation_%s_%s_%s.png' %
                                      (person_name, step, alpha))

                    images = Gs.components.synthesis.run(
                        np.array(dlatents_person), **Gs_kwargs)

                    for fname, image in zip(fnames, images):
                        PIL.Image.fromarray(image, 'RGB').save(
                            dnnlib.make_run_dir_path(fname))
def main():
    sc = dnnlib.SubmitConfig()
    sc.num_gpus = 1
    sc.submit_target = dnnlib.SubmitTarget.LOCAL
    sc.local.do_not_copy_source_files = True
    sc.run_dir_root = "D:\PythonProjectsDDrive\stylegan2-master"
    sc.run_desc = 'generate-images'
    network_pkl = "D:\PythonProjectsDDrive\stylegan2-master\TrainedGANs\\256-model-network-snapshot-018708.pkl"  #'D:\PythonProjectsDDrive\stylegan2-master\TrainedGANs\\network-snapshot-018708.pkl'

    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    vector_size = Gs.input_shape[1:][0]

    # vec1 = np.load("D:\PythonProjectsDDrive\MasterThesisClothesFittingwGANs\LatentSpaceImages\ToEditImages\image39_19039.npy", mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII')
    # vec2 = np.load("D:\PythonProjectsDDrive\MasterThesisClothesFittingwGANs\LatentSpaceImages\ToEditImages\image40_19040.npy", mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII')
    # vec3 = np.load("D:\PythonProjectsDDrive\MasterThesisClothesFittingwGANs\LatentSpaceImages\ToEditImages\image41_19041.npy", mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII')
    # vec4 = np.load("D:\PythonProjectsDDrive\MasterThesisClothesFittingwGANs\LatentSpaceImages\ToEditImages\image42_19042.npy", mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII')
    # displayList = [vec1, vec2, vec3, vec4]
    # #LoadImageFromVector(displayList, Gs)
    #
    # avrgVecBack = LoadVectorAverage("D:\PythonProjectsDDrive\MasterThesisClothesFittingwGANs\LatentSpaceImages\BackImages")
    # avrgVecFront = LoadVectorAverage("D:\PythonProjectsDDrive\MasterThesisClothesFittingwGANs\LatentSpaceImages\FrontImages")

    avrgGreyShirtDL = LoadVectorAverage(
        "D:\PythonProjectsDDrive\MasterThesisClothesFittingwGANs\LatentSpaceImages\MonoClothingIMages"
    )
    avrgPersonDL = LoadVectorAverage(
        "D:\PythonProjectsDDrive\stylegan2-master\\results\\00063-generate-images"
    )
    PerformVectorOperationAddSub(
        "D:\PythonProjectsDDrive\MasterThesisClothesFittingwGANs\LatentSpaceImages\ToEditImagesDL",
        avrgPersonDL, avrgGreyShirtDL, Gs)
示例#10
0
def project_real_images(network_pkl, dataset_name, data_dir, num_images,
                        num_steps, num_snapshots, save_every_dlatent,
                        save_final_dlatent):
    assert num_snapshots <= num_steps, "Can't have more snapshots than number of steps taken!"
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector(num_steps=num_steps)
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       tfrecord_dir=dataset_name,
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        try:
            images, _labels = dataset_obj.get_minibatch_np(1)
            images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
            project_image(proj,
                          targets=images,
                          png_prefix=dnnlib.make_run_dir_path('image%04d-' %
                                                              image_idx),
                          num_snapshots=num_snapshots,
                          save_every_dlatent=save_every_dlatent,
                          save_final_dlatent=save_final_dlatent)
        except tf.errors.OutOfRangeError:
            print(
                f'Error! There are only {image_idx} images in {data_dir}{dataset_name}!'
            )
            sys.exit(1)
示例#11
0
def project_real_images(network_pkl, dataset_name, data_dir, num_images,
                        num_snapshots):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       tfrecord_dir=dataset_name,
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[1:]

    imgs = read_images('/gdata2/fengrl/imgs-for-embed')

    for image_idx in range(len(imgs)):
        print('Projecting image %d/%d ...' % (image_idx, len(imgs)))
        # images, _labels = dataset_obj.get_minibatch_np(1)
        images = np.expand_dims(imgs[image_idx], 0)
        # images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        project_image(proj,
                      targets=images,
                      png_prefix=dnnlib.make_run_dir_path('image%04d-' %
                                                          image_idx),
                      num_snapshots=num_snapshots)
示例#12
0
def generate_grid_of_variants(submit_config, network_pkl, truncation_psi,
                              latents_file):
    print('starting process of generating grid of variants of ' + latents_file)

    tflib.init_tf({'rnd.np_random_seed': 1000})

    f = open(latents_file, 'r')
    original_latents = np.array(json.load(f))
    f.close()
    print('loaded original latents from ' + latents_file)

    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)

    grid_size = (32, 1)
    grid_labels = []

    grid_latents = np.ndarray(shape=(grid_size[0] * grid_size[1], 512))
    for i in range(grid_size[0] * grid_size[1]):
        grid_latents[i] = original_latents

    grid_fakes = Gs.run(grid_latents,
                        grid_labels,
                        is_validation=True,
                        minibatch_size=4)
    misc.save_image_grid(grid_fakes,
                         dnnlib.make_run_dir_path('latentmod-1.png'),
                         drange=[-1, 1],
                         grid_size=grid_size)
示例#13
0
def save_weights(network_pkl):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]
    with open("resolution.txt", "w") as f:
        f.write(",".join([str(i) for i in Gs.output_shape[-2:]]))

    #Gs_kwargs = dnnlib.EasyDict()
    #Gs_kwargs.output_transform = dict(func=to_png, nchw_to_nhwc=True) #dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    #Gs_kwargs.randomize_noise = False

    rnd = np.random.RandomState(0)
    z = rnd.randn(1, *Gs.input_shape[1:])  # [minibatch, component]
    tflib.set_vars(
        {var: rnd.randn(*var.shape.as_list())
         for var in noise_vars})  # [height, width]
    #try:
    #    images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]
    #except TypeError:
    #    pass

    saver = tf.train.Saver(var_list=tf.trainable_variables())
    saver.save(sess=tf.get_default_session(), save_path="save_weights")
    """
示例#14
0
def generate_video(network_pkl, seed, truncation_psi, num_images):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]
    # noise_vars = noise_vars[2:]
    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
                                      nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    if truncation_psi is not None:
        Gs_kwargs.truncation_psi = truncation_psi

    im_path = 'video/xseed%04d' % seed
    os.makedirs(im_path, exist_ok=True)
    rnd = np.random.RandomState(seed)
    latent_shape, label_shape = Gs.input_shapes
    labels = np.zeros([1, *label_shape[1:]])
    z_start = rnd.randn(1, *latent_shape[1:])  # [minibatch, component]
    z_stop = rnd.randn(1, *latent_shape[1:])  # [minibatch, component]
    tflib.set_vars(
        {var: rnd.randn(*var.shape.as_list())
         for var in noise_vars})  # [height, width]

    for image_idx, z in enumerate(np.linspace(z_start, z_stop, num_images)):
        im = Gs.run(z, None, **Gs_kwargs)[0]
        PIL.Image.fromarray(im,
                            'RGB').save(im_path + '/image%04d.png' % image_idx)
示例#15
0
def interpolation_example(network_pkl, src_seeds, tar_seeds, truncation_psi):
    assert len(src_seeds) == len(tar_seeds)

    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    if truncation_psi is not None:
        Gs_kwargs.truncation_psi = truncation_psi

    l = len(src_seeds)
    for seed_idx, (seed0, seed1) in enumerate(zip(src_seeds, tar_seeds)):
        print('Generating images for seeds %d and %d (%d/%d) ...' % (seed0, seed1, seed_idx + 1, l))
        rnd0 = np.random.RandomState(seed0)
        rnd1 = np.random.RandomState(seed1)
        z0 = rnd0.randn(1, *Gs.input_shape[1:]) # [minibatch, component]
        z1 = rnd1.randn(1, *Gs.input_shape[1:]) # [minibatch, component]
        tflib.set_vars({var: rnd0.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
        for prog, z in enumerate(np.linspace(z0, z1, 150)):
            print(f"{prog + 1}/150\r", end='')
            images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]
            PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path('int%04d-%04d-%03d.png' % (seed0, seed1, prog)))
示例#16
0
def truncation_traversal(network_pkl, npys, seed=[0], start=-1.0, stop=1.0, increment=0.1):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [var for name, var in Gs.components.synthesis.vars.items(
    ) if name.startswith('noise')]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(
        func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False

    count = 1
    trunc = start

    while trunc <= stop:
        Gs_kwargs.truncation_psi = trunc
        print('Generating truncation %0.2f' % trunc)

        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:])  # [minibatch, component]
        tflib.set_vars({var: rnd.randn(*var.shape.as_list())
                        for var in noise_vars})  # [height, width]
        # [minibatch, height, width, channel]
        images = Gs.run(z, None, **Gs_kwargs)
        PIL.Image.fromarray(images[0], 'RGB').save(
            dnnlib.make_run_dir_path('frame%05d.png' % count))

        trunc += increment
        count += 1
示例#17
0
def generate_noisevar_imgs(network_pkl, seeds, num_samples, num_variants):
    print("Loading networks from %s..." % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)[:3]
    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.truncation_psi = 1
    Gs_kwargs.output_transform = dict(func=tflib.convert_imgs_to_uint8,
                                      nchw_to_nhwc=True)
    Gs_kwargs.minibatch_size = 4
    _, _, H, W = Gs.output_shape

    for seed_idx, seed in enumerate(seeds):
        print("Generating image for seed %d (%d/%d)..." %
              (seed, seed_idx, len(seeds)))
        canvas = PIL.Image.new("RGB", (W * (num_variants + 2), H), "white")

        z = np.stack([np.random.RandomState(seed).randn(Gs.input_shape[1])] *
                     num_samples)  # [minibatch, component]
        imgs = Gs.run(z, None,
                      **Gs_kwargs)  # [minibatch, height, width, channel]

        npimgs = imgs
        imgs = [misc.to_pil(img) for img in imgs]

        save_gif(imgs, dnnlib.make_run_dir_path("noisevar%04d.gif" % seed))
        for i in range(num_variants + 1):
            canvas.paste(imgs[i], (i * W, 0))

        diff = np.std(np.mean(npimgs, axis=3), axis=0) * 4
        diff = np.clip(diff + 0.5, 0, 255).astype(np.uint8)
        canvas.paste(PIL.Image.fromarray(diff, "L"),
                     (W * (num_variants + 1), 0))

        canvas.save(dnnlib.make_run_dir_path("noisevar%04d.png" % seed))
示例#18
0
def project_generated_images(network_pkl, seeds, num_snapshots, num_steps,
                             truncation_psi, save_target_dlatent,
                             save_every_dlatent, save_final_dlatent):
    assert num_snapshots <= num_steps, "Can't have more snapshots than number of steps taken!"
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector(num_steps=num_steps)
    proj.set_network(Gs)
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.randomize_noise = False
    Gs_kwargs.truncation_psi = truncation_psi

    for seed_idx, seed in enumerate(seeds):
        print('Projecting seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:])
        tflib.set_vars(
            {var: rnd.randn(*var.shape.as_list())
             for var in noise_vars})
        w = Gs.components.mapping.run(z, None)
        if save_target_dlatent:
            np.save(dnnlib.make_run_dir_path('seed%04d.npy' % seed), w)
        images = Gs.components.synthesis.run(w, **Gs_kwargs)
        project_image(proj,
                      targets=images,
                      png_prefix=dnnlib.make_run_dir_path('seed%04d-' % seed),
                      num_snapshots=num_snapshots,
                      save_every_dlatent=save_every_dlatent,
                      save_final_dlatent=save_final_dlatent)
示例#19
0
def project_real_images(submit_config, network_pkl, dataset_name, data_dir,
                        num_images, num_snapshots):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.verbose = submit_config.verbose
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       tfrecord_dir=dataset_name,
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)
    print('dso shape: ' + str(dataset_obj.shape) + ' vs gs shape: ' +
          str(Gs.output_shape[1:]))
    assert dataset_obj.shape == Gs.output_shape[1:]

    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        project_image(proj,
                      targets=images,
                      png_prefix=dnnlib.make_run_dir_path('image%04d-' %
                                                          image_idx),
                      num_snapshots=num_snapshots)
示例#20
0
文件: stylegan2.py 项目: zahidna/ml4a
def load_model(network_pkl, randomize_noise=False):
    global _G, _D, Gs, Gs_syn_kwargs
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    Gs_syn_kwargs = dnnlib.EasyDict()
    Gs_syn_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
                                          nchw_to_nhwc=True)
    Gs_syn_kwargs.randomize_noise = randomize_noise
示例#21
0
def project_real_images(network_pkl, dataset_name, data_dir, num_images,
                        num_snapshots):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    proj = projector.Projector()
    proj.set_network(Gs)

    print('Loading images from "%s"...' % dataset_name)
    dataset_obj = dataset.load_dataset(data_dir=data_dir,
                                       tfrecord_dir=dataset_name,
                                       max_label_size=0,
                                       repeat=False,
                                       shuffle_mb=0)
    assert dataset_obj.shape == Gs.output_shape[
        1:], "%sexpected shape %s, got %s%s" % (
            dnnlib.util.Col.RB, Gs.output_shape[1:], dataset_obj.shape,
            dnnlib.util.Col.AU)

    for image_idx in range(num_images):
        print('Projecting image %d/%d ...' % (image_idx, num_images))
        images, _labels = dataset_obj.get_minibatch_np(1)
        images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
        project_image(proj,
                      targets=images,
                      png_prefix=dnnlib.make_run_dir_path('image%04d-' %
                                                          image_idx),
                      num_snapshots=num_snapshots)
示例#22
0
def style_mixing_multiple(network_pkl, row_seeds, col_seeds, truncation_psi, minibatch_size=4):
    col_styles = [[6, 7], [0, 3]]
    print('col_styles: {}'.format(col_styles))
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    w_avg = Gs.get_var('dlatent_avg')  # [component]

    Gs_syn_kwargs = dnnlib.EasyDict()
    Gs_syn_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    Gs_syn_kwargs.randomize_noise = False
    Gs_syn_kwargs.minibatch_size = minibatch_size

    print('Generating W vectors...')
    all_seeds = list(set(row_seeds + col_seeds))
    all_z = np.stack([np.random.RandomState(seed).randn(*Gs.input_shape[1:]) for seed in all_seeds])  # [minibatch, component]
    all_w = Gs.components.mapping.run(all_z, None)  # [minibatch, layer, component]
    all_w = w_avg + (all_w - w_avg) * truncation_psi  # [minibatch, layer, component]
    w_dict = {seed: w for seed, w in zip(all_seeds, list(all_w))}  # [layer, component]
    image_dict = {}

    print('Generating style-mixed images...')
    for row_seed in row_seeds:
        w = w_dict[row_seed].copy()
        for col_style, col_seed in zip(col_styles, col_seeds):
            w[col_style] = w_dict[col_seed][col_style]
        image = Gs.components.synthesis.run(w[np.newaxis], **Gs_syn_kwargs)[0]  # [height, width, channel]
        image_dict[(row_seed, col_seeds[0], col_seeds[1])] = image

    print('Saving images...')
    for (row_seed, col_seeds[0], col_seeds[1]), image in image_dict.items():
        PIL.Image.fromarray(image, 'RGB').save(dnnlib.make_run_dir_path('%d-%d-%d.png' % (row_seed, col_seeds[0], col_seeds[1])))
示例#23
0
def generate_images(network_pkl, seeds, truncation_psi):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
                                      nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    if truncation_psi is not None:
        Gs_kwargs.truncation_psi = truncation_psi

    for seed_idx, seed in enumerate(seeds):
        print('Generating image for seed %d (%d/%d) ...' %
              (seed, seed_idx, len(seeds)))
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:])  # [minibatch, component]
        tflib.set_vars(
            {var: rnd.randn(*var.shape.as_list())
             for var in noise_vars})  # [height, width]
        images = Gs.run(z, None,
                        **Gs_kwargs)  # [minibatch, height, width, channel]
        PIL.Image.fromarray(images[0], 'RGB').save(
            dnnlib.make_run_dir_path('seed%04d.png' % seed))
示例#24
0
def generate_ws(network_pkl, seeds, truncation_psi):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
    w_avg = Gs.get_var('dlatent_avg')  # [component]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    if truncation_psi is not None:
        Gs_kwargs.truncation_psi = truncation_psi

    Ws = np.zeros((len(seeds), 18, 512))

    for seed_idx, seed in enumerate(seeds):
        print('Generating W for seed %d (%d/%d) ...' % (seed, seed_idx+1, len(seeds)))
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:])  # [1, component]
        tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars})  # [height, width]
        start = time.time()
        w = Gs.components.mapping.run(z, None)  # [1, layer, component]
        w = w_avg + (w - w_avg) * truncation_psi  # [1, layer, component]
        print('Time: {}s'.format(time.time() - start))
        Ws[seed_idx, :, :] = w

    np.save('W_{}.npy'.format(len(seeds)), Ws)
示例#25
0
def vary_seeds(network_pkl, seeds, psi0, psi1, save_noise, q, count):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    #noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
    noise_vars_dict = {name:var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')}

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    #if truncation_psi is not None:
    #    Gs_kwargs.truncation_psi = truncation_psi

    for seed_idx, seed in enumerate(seeds):
        print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]
        z2 = z
        noise_seed = rnd.randint(0,1000000)
        rnd_noise = np.random.RandomState(noise_seed)
        vars = {var: rnd_noise.randn(*var.shape.as_list()) for name, var in noise_vars_dict.items()}
        truncation_psi = psi0 + (psi1-psi0)*np.random.random()
        Gs_kwargs.truncation_psi = truncation_psi        
        for i in range (count):
            tflib.set_vars(vars) # [height, width]           
            images = Gs.run(z2, None, **Gs_kwargs) # [minibatch, height, width, channel]            
            im = PIL.Image.fromarray(images[0], 'RGB')  #.save(dnnlib.make_run_dir_path('varysingle_%04d.png' % i))
            fn = 'vary_seed_%08d.jpg'%seed
            metadata = {'z':z2, 'truncation_psi':truncation_psi}            
            if save_noise:
                metadata_noise_vars = { name : vars[var] for name, var in noise_vars_dict.items()}
                metadata['noise_vars'] = metadata_noise_vars
            else:
                metadata['noise_seed'] = noise_seed
            save_with_metadata(im, fn, metadata, True)            
            z2 = (z + np.random.randn(*z.shape)/q) / (1 + 1/(q*q))            
示例#26
0
def main2(seed):
    tflib.init_tf()
    #_G, _D, Gs = pickle.load(open("karras2019stylegan-ffhq-1024x1024.pkl", "rb"))
    _G, _D, Gs = pretrained_networks.load_networks("dummy")
    generator = Generator(Gs, batch_size=1, randomize_noise=False)
    Gs.print_layers()
    rnd = np.random.RandomState(None)
    fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    synthesis_kwargs = dict(output_transform=fmt,
                            truncation_psi=0.7,
                            minibatch_size=8)
    vectors = gen_img_with_18_512(Gs, fmt, rnd, dst_seeds=seed)
    np.save(os.path.join(result_dir, 'test_changed/0-original.npy'), vectors)
    create_order_npy("0-original.npy", vectors)
    # load all directions
    direction_vectors = "D:/Projects/training_datasets/emotions/style2/*.npy"
    dataset = glob.glob(direction_vectors)
    dataset = natural_sort(dataset)
    for npy in dataset:
        print(npy)
        file_name = os.path.basename(npy)
        file_name_no_extension = os.path.splitext(file_name)[0]
        print(file_name_no_extension)
        #vectors = create_full(vectors, npy, file_name_no_extension, Gs, generator)
        create_all(vectors, npy, file_name_no_extension, Gs, generator, cof)
示例#27
0
def generate_images(network_pkl, seeds, truncation_psi):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    if truncation_psi is not None:
        Gs_kwargs.truncation_psi = truncation_psi

    L = []
    for seed_idx, seed in enumerate(seeds):
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]        
        vars = {var: rnd.randn(*var.shape.as_list()) for var in noise_vars}
        L.append((seed, z, vars))
    L0 = L[0]
    L = L[1:]
        
    for i in range (200):
        random.shuffle(L)
        #L0, L1 = L[0], L[1]
        L1 = L[0]
        r = random.random()
        z = r*L0[1] + (1-r)*L1[1]
        vars = { var : r*L0[2][var] + (1-r)*L1[2][var] for var in noise_vars }
        tflib.set_vars(vars) # [height, width]           
        images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]
        path = 'seed{:010d}-{:010d}-{:03d}.png'.format(L0[0], L1[0], int(r*1000))
        print(path)
        PIL.Image.fromarray(images[0], 'RGB').save(dnnlib.make_run_dir_path(path))
示例#28
0
def generate_images(network_pkl, seeds, truncation_psi, label=None, output_dir=None):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [var for name, var in Gs.components.synthesis.vars.items(
    ) if name.startswith('noise')]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(
        func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    if truncation_psi is not None:
        Gs_kwargs.truncation_psi = truncation_psi

    if label is not None:
        label = label.reshape([1, -1])
    
    if output_dir is None:
        output_dir = dnnlib.make_run_dir_path()
    os.makedirs(output_dir, exist_ok=True)

    for seed_idx, seed in enumerate(seeds):
        print('[%d/%d] Generating image for seed %d, labels = [%s]...' %
              (seed_idx, len(seeds), seed, _label2str(label, ',')))
        rnd = np.random.RandomState(seed)
        z = rnd.randn(1, *Gs.input_shape[1:])  # [minibatch, component]
        tflib.set_vars({var: rnd.randn(*var.shape.as_list())
                        for var in noise_vars})  # [height, width]
        # [minibatch, height, width, channel]
        images = Gs.run(z, label, **Gs_kwargs)

        PIL.Image.fromarray(images[0], 'RGB').save(
            os.path.join(output_dir, '%s_seed%04d.png' % (_label2str(label), seed)))
示例#29
0
def generate_images(network_pkl, num, truncation_psi=0.5):
    print('Loading networks from "%s"...' % network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(network_pkl)
    noise_vars = [
        var for name, var in Gs.components.synthesis.vars.items()
        if name.startswith('noise')
    ]

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
                                      nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    if truncation_psi is not None:
        Gs_kwargs.truncation_psi = truncation_psi
    for i in range(num):
        print('Generating image %d/%d ...' % (i, num))

        # Generate random latent
        z = np.random.randn(1, *Gs.input_shape[1:])  # [minibatch, component]

        # Save latent
        txt_filename = 'results/generate_codes/' + str(i).zfill(4) + '.txt'
        with open(txt_filename, 'w') as f:
            text_save(f, z)

        # Generate image
        tflib.set_vars(
            {var: np.random.randn(*var.shape.as_list())
             for var in noise_vars})  # [height, width]
        images = Gs.run(z, None,
                        **Gs_kwargs)  # [minibatch, height, width, channel]

        # Save image
        PIL.Image.fromarray(images[0], 'RGB').save(
            dnnlib.make_run_dir_path('results/' + str(i) + '.png'))
示例#30
0
def project(src, dst, iters):
    global NUM_ITERS
    args = Arguments()
    args.src = src
    args.dst = dst
    args.num_steps = iters

    print('Loading networks from "%s"...' % args.network_pkl)
    _G, _D, Gs = pretrained_networks.load_networks(args.network_pkl)
    proj = projector.Projector(
        vgg16_pkl=args.vgg16_pkl,
        num_steps=args.num_steps,
        initial_learning_rate=args.initial_learning_rate,
        initial_noise_factor=args.initial_noise_factor,
        verbose=args.verbose)
    proj.set_network(Gs)

    src_files = sorted([
        os.path.join(args.src_dir, f) for f in os.listdir(args.src_dir)
        if f[0] not in '._'
    ])
    for src_file in src_files:
        project_image(proj,
                      src_file,
                      args.dst_dir,
                      args.tmp_dir,
                      video=args.video)
        if args.video:
            render_video(src_file, args.dst_dir, args.tmp_dir, args.num_steps,
                         args.video_mode, args.video_size, args.video_fps,
                         args.video_codec, args.video_bitrate)
        shutil.rmtree(args.tmp_dir)