def generate_interpolation_video(run_id, snapshot=None, grid_size=[1,1], image_shrink=1, image_zoom=1, duration_sec=60.0, smoothing_sec=1.0, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M', random_seed=1000, minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if mp4 is None:
        mp4 = misc.get_id_string_for_network_pkl(network_pkl) + '-lerp.mp4'
    num_frames = int(np.rint(duration_sec * mp4_fps))
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    print('Generating latent vectors...')
    shape = [num_frames, np.prod(grid_size)] + Gs.input_shape[1:] # [frame, image, channel, component]
    all_latents = random_state.randn(*shape).astype(np.float32)
    all_latents = scipy.ndimage.gaussian_filter(all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape), mode='wrap')
    all_latents /= np.sqrt(np.mean(np.square(all_latents)))

    # Frame generation func for moviepy.
    def make_frame(t):
        frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
        latents = all_latents[frame_idx]
        labels = np.zeros([latents.shape[0], 0], np.float32)
        images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.uint8)
        grid = misc.create_image_grid(images, grid_size).transpose(1, 2, 0) # HWC
        if image_zoom > 1:
            grid = scipy.ndimage.zoom(grid, [image_zoom, image_zoom, 1], order=0)
        if grid.shape[2] == 1:
            grid = grid.repeat(3, 2) # grayscale => RGB
        return grid

    # Generate video.
    import moviepy.editor # pip install moviepy
    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    moviepy.editor.VideoClip(make_frame, duration=duration_sec).write_videofile(os.path.join(result_subdir, mp4), fps=mp4_fps, codec='libx264', bitrate=mp4_bitrate)
    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
def generate_fake_images(run_id,
                         snapshot=None,
                         grid_size=[1, 1],
                         num_pngs=1,
                         image_shrink=1,
                         png_prefix=None,
                         random_seed=1000,
                         minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if png_prefix is None:
        png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-'

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    for png_idx in range(num_pngs):
        print('Generating png %d / %d...' % (png_idx, num_pngs))
        latents = misc.random_latents(np.prod(grid_size),
                                      Gs,
                                      random_state=random_state)
        labels = np.zeros([latents.shape[0], 0], np.float32)
        images = Gs.run(latents,
                        labels,
                        minibatch_size=minibatch_size,
                        num_gpus=config.num_gpus,
                        out_mul=127.5,
                        out_add=127.5,
                        out_shrink=image_shrink,
                        out_dtype=np.uint8)
        misc.save_image_grid(
            images,
            os.path.join(result_subdir, '%s%06d.png' % (png_prefix, png_idx)),
            [0, 255], grid_size)
    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
def generate_interpolation_video(run_id, snapshot=None, grid_size=[1,1], image_shrink=1, image_zoom=1, duration_sec=60.0, smoothing_sec=1.0, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M', random_seed=1000, minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if mp4 is None:
        mp4 = misc.get_id_string_for_network_pkl(network_pkl) + '-lerp.mp4'
    num_frames = int(np.rint(duration_sec * mp4_fps))
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    print('Generating latent vectors...')
    shape = [num_frames, np.prod(grid_size)] + Gs.input_shape[1:] # [frame, image, channel, component]
    all_latents = random_state.randn(*shape).astype(np.float32)
    all_latents = scipy.ndimage.gaussian_filter(all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape), mode='wrap')
    all_latents /= np.sqrt(np.mean(np.square(all_latents)))

    # Frame generation func for moviepy.
    def make_frame(t):
        frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
        latents = all_latents[frame_idx]
        labels = np.zeros([latents.shape[0], 0], np.float32)
        images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.uint8)
        grid = misc.create_image_grid(images, grid_size).transpose(1, 2, 0) # HWC
        if image_zoom > 1:
            grid = scipy.ndimage.zoom(grid, [image_zoom, image_zoom, 1], order=0)
        if grid.shape[2] == 1:
            grid = grid.repeat(3, 2) # grayscale => RGB
        return grid

    # Generate video.
    import moviepy.editor # pip install moviepy
    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    moviepy.editor.VideoClip(make_frame, duration=duration_sec).write_videofile(os.path.join(result_subdir, mp4), fps=mp4_fps, codec='libx264', bitrate=mp4_bitrate)
    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
Beispiel #4
0
def generate_interpolation_video(run_id, snapshot=None, grid_size=[1,1], image_shrink=1, image_zoom=1, duration_sec=60.0, smoothing_sec=1.0, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M', random_seed=1000, minibatch_size=8):

    interpolate_dim=500

    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if mp4 is None:
        # mp4 = misc.get_id_string_for_network_pkl(network_pkl) + '-lerp.mp4'
        mp4 = misc.get_id_string_for_network_pkl(network_pkl) + '-lerp' + '-interpolate-dim-' + str(interpolate_dim) + '.mp4'
    num_frames = int(np.rint(duration_sec * mp4_fps))
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    print('Generating latent vectors...')
    ## shape=[1800, 1, 512]; grid_size=[1, 1]; Gs.input_shape=[None, 512]
    shape = [num_frames, np.prod(grid_size)] + Gs.input_shape[1:] # [frame, image, channel, component]
    all_latents = random_state.randn(*shape).astype(np.float32)
    all_latents = scipy.ndimage.gaussian_filter(all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape), mode='wrap')
    all_latents /= np.sqrt(np.mean(np.square(all_latents)))

    latent_dims=512         # TODO: Hardcoded
    for frame in range(num_frames):
        all_latents[frame][0][0:interpolate_dim]=-0.5
        all_latents[frame][0][interpolate_dim+1:latent_dims]=-0.5

    # Frame generation func for moviepy.
    def make_frame(t):
        frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
        latents = all_latents[frame_idx]
        labels = np.zeros([latents.shape[0], 0], np.float32)
        images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.uint8)
        grid = misc.create_image_grid(images, grid_size).transpose(1, 2, 0) # HWC
        if image_zoom > 1:
            grid = scipy.ndimage.zoom(grid, [image_zoom, image_zoom, 1], order=0)
        if grid.shape[2] == 1:
            grid = grid.repeat(3, 2) # grayscale => RGB
        return grid

    # Generate video.
    import moviepy.editor # pip install moviepy
    # result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    result_subdir = misc.create_result_subdir(config.result_dir, config.desc + '-interpolate-dim-' + str(interpolate_dim))
    moviepy.editor.VideoClip(make_frame, duration=duration_sec).write_videofile(os.path.join(result_subdir, mp4), fps=mp4_fps, codec='libx264', bitrate=mp4_bitrate)

    # aSk
    # np.set_printoptions(threshold=np.inf)
    # with open(os.path.join(result_subdir, 'all_latents_mod.txt'), 'w') as fp:
    #     fp.write(str(all_latents))

    with open(os.path.join(result_subdir, 'latents_interpolate_dim_' + str(interpolate_dim) + '.txt'), 'w') as fp:
        for frame in range(num_frames):
            for latent_dim in range(latent_dims):
                fp.write(str(all_latents[frame][0][latent_dim]) + ', ')
            fp.write('\n')
    # _aSk

    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
def generate_fake_images_cond2(run_id, x, y, snapshot=None, grid_size=[1,1], num_pngs=1, image_shrink=1, png_prefix=None, random_seed=1000, minibatch_size=1):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if png_prefix is None:
        png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-'
    
    
    
    training_set = dataset.load_dataset(data_dir=config.data_dir, shuffle_mb=0, verbose=True, **config.dataset)
    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)
    real, label = training_set.get_minibatch_np(num_pngs)
    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    size=128
    grid= np.zeros((3, x*256, y* 256))
    image=[]
    for i in range(1, 18): 
      filename = 'cond4/'+str(i)+'.png'
      if os.path.isfile(filename):
          im=Image.open(filename)
          im.load()
          im = np.asarray(im, dtype=np.float32 )
          im=np.transpose(im, (2, 0, 1))
          image.append(im)
      print(image[i-1].shape)
    print(len(image))
    for i in range (1, x):
        grid[:, (i)*256:(i)*256+256, 0:256]= image[i]
    print(i)
    for j in range (i, 16):
        grid[:, 0:256, (j-i)*256:(j-i)*256+256]= image[j]
    
    for j in range (128, y*256-128, 128):
        for i in range (128, x*256-128, 128):
        
            real = grid[:,i:i+256, j:j+256]
    
            real1= real[:, :(size),:(size)]
            real2= real[:, (size):,:(size)]
            real3= real[:, :(size),(size):]
            real1=(real1.astype(np.float32)-127.5)/127.5
            real2=(real2.astype(np.float32)-127.5)/127.5
            real3=(real3.astype(np.float32)-127.5)/127.5
            latents = np.random.randn(3, 128, 128)
            left = np.concatenate((real1, real2), axis=1)
            print('left:'+str(left.shape))
            right = np.concatenate((real3, latents), axis=1)
            lat_and_cond = np.concatenate((left, right), axis=2)
            lat_and_cond = lat_and_cond[np.newaxis]
            fake_images_out_small = Gs.get_output_for(lat_and_cond, is_training=False)
            fake_images_out_small = (fake_images_out_small.eval()*127.5)+127.5
            print(fake_images_out_small.shape)
            fake_images_out_small=fake_images_out_small[0, :,:,:]
            grid[:,i+128:i+256, j+128:j+256]=fake_images_out_small
        
    images = grid[np.newaxis]
    misc.save_image_grid(images, os.path.join(result_subdir, 'grid.png'), [0,255], grid_size)
    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
Beispiel #6
0
def fit_real_images(run_id,
                    snapshot=None,
                    num_pngs=1,
                    image_shrink=1,
                    png_prefix=None,
                    random_seed=1000,
                    minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if png_prefix is None:
        png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-'
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)
    latent = tf.get_variable('latent', shape=(1, 512), trainable=True)
    label = tf.get_variable('label', shape=(1, 0), trainable=True)
    images = Gs.fit(latent,
                    label,
                    minibatch_size=minibatch_size,
                    num_gpus=config.num_gpus)
    sess = tf.get_default_session()

    target = tf.placeholder(tf.float32, name='target')
    lr = tf.placeholder(tf.float32, name='lr')
    #loss = tf.reduce_sum(tf.abs(images[0][0] - target))
    loss = tf.nn.l2_loss(images[0][0] - target)
    with tf.variable_scope('adam'):
        opt = tf.train.AdamOptimizer(lr).minimize(loss, var_list=latent)

    sess.run(tf.variables_initializer([latent, label]))
    sess.run(tf.variables_initializer(tf.global_variables('adam')))

    # real_path = '/vol/phoebe/3DMD_SCIENCE_MUSEUM/Colour_UV_maps'
    # real_path = '/home/baris/data/mein3d_600x600'
    real_path = '/media/gen/pca_alone'
    save_path = '/media/gen/gan-pca'
    #target_im = PIL.Image.open('/media/logs-nvidia/002-fake-images-0/000-pgan-mein3d_tf-preset-v2-2gpus-fp32-VERBOSE-HIST-network-final-000001.png')

    for ind, real in enumerate(myutil.files(real_path)):
        target_im = myutil.crop_im(
            PIL.Image.open(os.path.join(real_path, real)))
        for j in [0.1, 0.01, 0.001]:
            for i in range(500):
                l2, _ = sess.run([loss, opt], {
                    target: myutil.rgb2tf(target_im),
                    lr: j
                })
                if i % 100 == 0:
                    print(l2)

        myutil.concat_image(np.asarray(target_im),
                            myutil.tf2rgb(sess.run(images))).save(
                                os.path.join(save_path, real))

    sess.close()
Beispiel #7
0
def generate_fake_interpolate_midle_images(run_id,
                                           snapshot=None,
                                           grid_size=[1, 1],
                                           num_pngs=1,
                                           image_shrink=1,
                                           png_prefix=None,
                                           random_seed=1000,
                                           minibatch_size=8,
                                           middle_img=10):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if png_prefix is None:
        png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-'
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    for png_idx in range(num_pngs):
        latents = misc.random_latents(middle_img + 2,
                                      Gs,
                                      random_state=random_state)
        from_to_tensor = latents[middle_img + 1] - latents[0]
        from_z = latents[0]
        #between_x_list = [from_x]
        counter = 0
        for alpha in np.linspace(-0.5, 0.5, middle_img +
                                 2):  #np.linspace(0, 1, middle_img + 1):
            print('alpha: ', alpha, 'counter= ', counter)
            between_z = from_z + alpha * from_to_tensor
            latents[counter] = between_z
            counter += 1
        labels = np.zeros([latents.shape[0], 0], np.float32)
        images = Gs.run(latents,
                        labels,
                        minibatch_size=minibatch_size,
                        num_gpus=config.num_gpus,
                        out_mul=127.5,
                        out_add=127.5,
                        out_shrink=image_shrink,
                        out_dtype=np.uint8)
        #grid_size_1=[middle_img+2,1]
        grid_size_1 = [middle_img + 1, 1]
        #png_prefix=0

        misc.save_image_grid(
            images[1:, :, :, :],
            os.path.join(result_subdir, '%s%06d.png' % (png_prefix, png_idx)),
            [0, 255], grid_size_1)
    '''
Beispiel #8
0
def find_latent_with_query_image(run_id, snapshot=None, grid_size=[1,1], num_pngs=1, image_shrink=1, png_prefix=None, random_seed=4123, minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if png_prefix is None:
        png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-'
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    
    # Create query image - tensorflow constant
    query_image = cv2.imread('../../data/ACDC/training/patient001/cardiac_cycles/0/0.png')
    query_image = cv2.resize(query_image, (256, 256))
    print('Saving query image to "%s"...' % result_subdir)
    cv2.imwrite(result_subdir+'/query_image.png', query_image)
    query_image = query_image.transpose(2,0,1)
    query_image = query_image[np.newaxis]
    x = tf.constant(query_image, dtype=tf.float32, name='query_image')
    # Create G(z) - tensorflow variable and label
    latent = misc.random_latents(np.prod(grid_size), Gs, random_state=random_state)
    initial = tf.constant(latent, dtype=tf.float32)
    z = tf.Variable(initial_value=initial, dtype=tf.float32, name='latent_space')
    label = np.zeros([latent.shape[0], 5], np.float32)
    label[:,4] = 1 # | 0 -> NOR | 1 -> DCM | 2 -> HCM | 3 -> MINF | 4 -> RV | 
    gz = Gs.run(latent, label, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.float32)
    gz = tf.Variable(gz, dtype=tf.float32)
    # Define a loss function
    residual_loss = tf.losses.absolute_difference(x, gz)
    # Define an optimizer
    train_op = tf.train.AdamOptimizer(learning_rate=0.01).minimize(residual_loss)
    
    zs, gzs, step = [], [], 1
    
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        _, loss_value = sess.run([train_op, residual_loss])
        while (loss_value > 2e-04 and step <= 50000):
            _, loss_value = sess.run([train_op, residual_loss])
            step += 1
            if step % 10000 == 0:
                print('Step {}, Loss value: {}'.format(step, loss_value))
                gzs.append(sess.run(gz))
                zs.append(sess.run(z))
                
    for png_idx, image in enumerate(gzs):
        misc.save_image_grid(image, os.path.join(result_subdir, '%s%06d.png' % (png_prefix, png_idx)), [0,255], grid_size)
        
    np.save(result_subdir+'/zs.npy', np.asarray(zs))
def generate_fake_images(run_id, snapshot=None, grid_size=[1,1], num_pngs=1, image_shrink=1, png_prefix=None, random_seed=1000, minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if png_prefix is None:
        png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-'
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    for png_idx in range(num_pngs):
        print('Generating png %d / %d...' % (png_idx, num_pngs))
        latents = misc.random_latents(np.prod(grid_size), Gs, random_state=random_state)
        labels = np.zeros([latents.shape[0], 0], np.float32)
        images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.uint8)
        misc.save_image_grid(images, os.path.join(result_subdir, '%s%06d.png' % (png_prefix, png_idx)), [0,255], grid_size)
    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
def generate_test_image_with_corresponding_z(run_id,
                                             snapshot=None,
                                             grid_size=[1, 1],
                                             image_shrink=1,
                                             image_zoom=1,
                                             num_frames=None,
                                             random_seed=1000,
                                             minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if num_frames is None:
        num_frames = 4
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    print('Generating latent vectors...')
    shape = [num_frames, np.prod(grid_size)
             ] + Gs.input_shape[1:]  # [frame, image, channel, component]
    all_latents = random_state.randn(*shape).astype(np.float32)
    #all_latents = scipy.ndimage.gaussian_filter(all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape), mode='wrap')
    all_latents /= np.sqrt(np.mean(np.square(all_latents)))

    # Generate images.
    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    for idx, latent in enumerate(all_latents):
        labels = np.zeros([latent.shape[0], 0], np.float32)
        images = Gs.run(latent,
                        labels,
                        minibatch_size=minibatch_size,
                        num_gpus=config.num_gpus,
                        out_mul=127.5,
                        out_add=127.5,
                        out_shrink=image_shrink,
                        out_dtype=np.uint8)
        grid = misc.create_image_grid(images, grid_size).transpose(1, 2,
                                                                   0)  # HWC
        if image_zoom > 1:
            grid = scipy.ndimage.zoom(grid, [image_zoom, image_zoom, 1],
                                      order=0)
        if grid.shape[2] == 1:
            grid = grid.repeat(3, 2)  # grayscale => RGB
        np.save(os.path.join(result_subdir, 'z_' + str(idx)), latent)
        filename = 'im_' + str(idx) + '.png'
        scipy.misc.imsave(os.path.join(result_subdir, filename), grid)
def generate_fake_images_cond(run_id, snapshot=None, grid_size=[1,1], num_pngs=1, image_shrink=1, png_prefix=None, random_seed=1000, minibatch_size=1):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if png_prefix is None:
        png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-'
    random_state = np.random.RandomState(random_seed)
    
    
    training_set = dataset.load_dataset(data_dir=config.data_dir,   verbose=True, **config.dataset)
    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)
    real, label = training_set.get_minibatch_np(num_pngs)
    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    size=128

    for png_idx in range(num_pngs):
        

        real1= real[png_idx,:, :(size),:(size)]
        real2= real[png_idx,:, (size):,:(size)]
        real3= real[png_idx,:, :(size),(size):]
        real1=(real1.astype(np.float32)-127.5)/127.5
        real2=(real2.astype(np.float32)-127.5)/127.5
        real3=(real3.astype(np.float32)-127.5)/127.5
        latents = np.random.randn(3, 128, 128)
        left = np.concatenate((real1, real2), axis=1)
        print('left:'+str(left.shape))
        right = np.concatenate((real3, latents), axis=1)
        lat_and_cond = np.concatenate((left, right), axis=2)
        lat_and_cond = lat_and_cond[np.newaxis]
        fake_images_out_small = Gs.get_output_for(lat_and_cond, is_training=False)
        fake_images_out_small = (fake_images_out_small.eval()*127.5)+127.5
        print(fake_images_out_small.shape)
        fake_images_out_small=fake_images_out_small[0, :,:,:]
        
        real1=(real1.astype(np.float32)*127.5)+127.5
        real2=(real2.astype(np.float32)*127.5)+127.5
        real3=(real3.astype(np.float32)*127.5)+127.5     
        fake_image_out_right =np.concatenate((real3, fake_images_out_small), axis=1)                 
        fake_image_out_left = np.concatenate((real1, real2), axis=1)
        images = np.concatenate((fake_image_out_left, fake_image_out_right), axis=2)
        images = images[np.newaxis]
        misc.save_image_grid(images, os.path.join(result_subdir, '%s%06d.png' % (png_prefix, png_idx)), [0,255], grid_size)
    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
Beispiel #12
0
def generate_fake_images(run_id,
                         snapshot=None,
                         grid_size=[1, 1],
                         num_pngs=1,
                         image_shrink=1,
                         subdir=None,
                         random_seed=1000,
                         minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if subdir is None:
        subdir = misc.get_id_string_for_network_pkl(network_pkl)
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    result_subdir = "results/images/" + subdir
    if not os.path.exists(result_subdir):
        os.makedirs(result_subdir)
    for png_idx in range(num_pngs):
        print('Generating png %d / %d...' % (png_idx, num_pngs))
        latents = random_latents(np.prod(grid_size),
                                 Gs,
                                 random_state=random_state)
        labels = np.zeros([latents.shape[0], 0], np.float32)
        images = Gs.run(latents,
                        labels,
                        minibatch_size=minibatch_size,
                        num_gpus=1,
                        out_mul=127.5,
                        out_add=127.5,
                        out_shrink=image_shrink,
                        out_dtype=np.uint8,
                        randomize_noise=False)
        misc.save_image_grid(
            images, os.path.join(result_subdir, '%06d.png' % (png_idx)),
            [0, 255], grid_size)
        np.save(result_subdir + "/" + '%06d' % (png_idx), latents)
Beispiel #13
0
def get_generator(run_id, snapshot=None, image_shrink=1, minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)
    latent = tf.get_variable('latent', shape=(1, 512), trainable=True)
    label = tf.get_variable('label',
                            shape=(1, 0),
                            trainable=True,
                            initializer=tf.zeros_initializer)
    images = Gs.fit(latent,
                    label,
                    minibatch_size=minibatch_size,
                    num_gpus=config.num_gpus,
                    out_mul=0.5,
                    out_add=0.5,
                    out_shrink=image_shrink,
                    out_dtype=np.float32)
    sess = tf.get_default_session()

    sess.run(tf.variables_initializer([latent, label]))

    return images, latent, sess
Beispiel #14
0
def find_dir_latent_with_query_image(run_id, snapshot=None, grid_size=[1,1], num_pngs=1, image_shrink=1, png_prefix=None, random_seed=4123, minibatch_size=8, dir_path='../../data/ACDC/latents/cleaned_testing/'):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if png_prefix is None:
        png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-'
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    replicate_folder_structure(dir_path, result_subdir+'/')

    train_patients = sorted_nicely(glob.glob(dir_path+'*'))

    for patient in train_patients:
        cardiac_cycles = sorted_nicely(glob.glob(patient+'/*/*/*.png'))
        cfg = open(patient+'/Info.cfg')
        label = condition_to_onehot(cfg.readlines()[2][7:])
        cont = 0
        for cycle in cardiac_cycles:
            # Get folder containing the image
            supfolder = sup_folder(cycle)
            latent_subir = result_subdir + '/' + supfolder

            # Create query image - tensorflow constant
            query_image = cv2.imread(cycle) # read frame
            query_image = cv2.resize(query_image, (256, 256))
            query_image = query_image.transpose(2,0,1)
            query_image = query_image[np.newaxis]
            x = tf.constant(query_image, dtype=tf.float32, name='query_image')

            # Create G(z) - tensorflow variable and label
            latent = misc.random_latents(np.prod(grid_size), Gs, random_state=random_state)
            initial = tf.constant(latent, dtype=tf.float32)
            z = tf.Variable(initial_value=initial, dtype=tf.float32, name='latent_space')
            gz = Gs.run(latent, label, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.float32)
            gz = tf.Variable(gz, dtype=tf.float32)

            # Define a loss function
            residual_loss = tf.losses.absolute_difference(x, gz)
            # Define an optimizer
            train_op = tf.train.AdamOptimizer(learning_rate=0.1).minimize(residual_loss)

            zs, gzs, step = [], [], 1
    
            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                _, loss_value = sess.run([train_op, residual_loss])
                while (loss_value > 2e-04 and step <= 5000):
                    _, loss_value = sess.run([train_op, residual_loss])
                    step += 1
                    if step % 1000 == 0:
                        print('Step {}, Loss value: {}'.format(step, loss_value))
                        gzs.append(sess.run(gz))
                        zs.append(sess.run(z))
            
            # save last image
            print('Image saved at {}'.format(os.path.join(latent_subir, '%s.png' % (cont))))
            misc.save_image_grid(gzs[-1], os.path.join(latent_subir, '%02d.png' % (cont)), [0,255], grid_size)
            print('Latent vectors saved at {}'.format(os.path.join(latent_subir, 'latent_%02d.npy' % (cont))))
            np.save(os.path.join(latent_subir, 'latent_%02d.npy' % (cont)), zs[-1])
            print('Labels saved at {}'.format(os.path.join(latent_subir, 'label_%02d.npy' % (cont))))
            np.save(os.path.join(latent_subir, 'label_%02d.npy' % (cont)), label)
            cont+=1

        cfg.close()
        cont = 0
Beispiel #15
0
def generate_interpolation_images(run_id,
                                  snapshot=None,
                                  grid_size=[1, 1],
                                  image_shrink=1,
                                  image_zoom=1,
                                  duration_sec=60.0,
                                  smoothing_sec=1.0,
                                  mp4=None,
                                  mp4_fps=30,
                                  mp4_codec='libx265',
                                  mp4_bitrate='16M',
                                  random_seed=1000,
                                  minibatch_size=8):

    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if mp4 is None:
        mp4 = misc.get_id_string_for_network_pkl(network_pkl) + '-lerp.mp4'
    num_frames = int(np.rint(duration_sec * mp4_fps))
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    print('Generating latent vectors...')
    shape = [num_frames, np.prod(grid_size)
             ] + Gs.input_shape[1:]  # [frame, image, channel, component]
    all_latents = random_state.randn(*shape).astype(np.float32)
    all_latents = scipy.ndimage.gaussian_filter(
        all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape),
        mode='wrap')
    all_latents /= np.sqrt(np.mean(np.square(all_latents)))

    lsfm_model = m3io.import_lsfm_model(
        '/home/baris/Projects/faceganhd/models/all_all_all.mat')
    lsfm_tcoords = \
    mio.import_pickle('/home/baris/Projects/team members/stelios/UV_spaces_V2/UV_dicts/full_face/512_UV_dict.pkl')[
        'tcoords']
    lsfm_params = []
    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    for png_idx in range(int(num_frames / minibatch_size)):
        start = time.time()
        print('Generating png %d-%d / %d... in ' %
              (png_idx * minibatch_size,
               (png_idx + 1) * minibatch_size, num_frames),
              end='')
        latents = all_latents[png_idx * minibatch_size:(png_idx + 1) *
                              minibatch_size, 0]
        labels = np.zeros([latents.shape[0], 0], np.float32)
        images = Gs.run(latents,
                        labels,
                        minibatch_size=minibatch_size,
                        num_gpus=config.num_gpus,
                        out_shrink=image_shrink)
        for i in range(minibatch_size):
            texture = Image(np.clip(images[i, 0:3] / 2 + 0.5, 0, 1))
            mesh_raw = from_UV_2_3D(Image(images[i, 3:6]))
            normals = images[i, 6:9]
            normals_norm = (normals - normals.min()) / (normals.max() -
                                                        normals.min())
            mesh = lsfm_model.reconstruct(mesh_raw)
            lsfm_params.append(lsfm_model.project(mesh_raw))
            t_mesh = TexturedTriMesh(mesh.points, lsfm_tcoords.points, texture,
                                     mesh.trilist)
            m3io.export_textured_mesh(
                t_mesh,
                os.path.join(result_subdir,
                             '%06d.obj' % (png_idx * minibatch_size + i)),
                texture_extension='.png')
            mio.export_image(
                Image(normals_norm),
                os.path.join(result_subdir,
                             '%06d_nor.png' % (png_idx * minibatch_size + i)))
        print('%0.2f seconds' % (time.time() - start))
    mio.export_pickle(lsfm_params,
                      os.path.join(result_subdir, 'lsfm_params.pkl'))
    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
Beispiel #16
0
def generate_fake_images(run_id,
                         snapshot=None,
                         grid_size=[1, 1],
                         batch_size=8,
                         num_pngs=1,
                         image_shrink=1,
                         png_prefix=None,
                         random_seed=1000,
                         minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if png_prefix is None:
        png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-'
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    lsfm_model = m3io.import_lsfm_model(
        '/home/baris/Projects/faceganhd/models/all_all_all.mat')
    lsfm_tcoords = \
    mio.import_pickle('/home/baris/Projects/team members/stelios/UV_spaces_V2/UV_dicts/full_face/512_UV_dict.pkl')[
        'tcoords']
    lsfm_params = []

    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    for png_idx in range(int(num_pngs / batch_size)):
        start = time.time()
        print('Generating png %d-%d / %d... in ' %
              (png_idx * batch_size, (png_idx + 1) * batch_size, num_pngs),
              end='')
        latents = misc.random_latents(np.prod(grid_size) * batch_size,
                                      Gs,
                                      random_state=random_state)
        labels = np.zeros([latents.shape[0], 0], np.float32)
        images = Gs.run(latents,
                        labels,
                        minibatch_size=minibatch_size,
                        num_gpus=config.num_gpus,
                        out_shrink=image_shrink)
        for i in range(batch_size):
            if images.shape[1] == 3:
                mio.export_pickle(
                    images[i],
                    os.path.join(
                        result_subdir,
                        '%s%06d.pkl' % (png_prefix, png_idx * batch_size + i)))
                # misc.save_image(images[i], os.path.join(result_subdir, '%s%06d.png' % (png_prefix, png_idx*batch_size+i)), [0,255], grid_size)
            elif images.shape[1] == 6:
                mio.export_pickle(images[i][3:6],
                                  os.path.join(
                                      result_subdir, '%s%06d.pkl' %
                                      (png_prefix, png_idx * batch_size + i)),
                                  overwrite=True)
                misc.save_image(
                    images[i][0:3],
                    os.path.join(
                        result_subdir,
                        '%s%06d.png' % (png_prefix, png_idx * batch_size + i)),
                    [-1, 1], grid_size)
            elif images.shape[1] == 9:
                texture = Image(np.clip(images[i, 0:3] / 2 + 0.5, 0, 1))
                mesh_raw = from_UV_2_3D(Image(images[i, 3:6]))
                normals = images[i, 6:9]
                normals_norm = (normals - normals.min()) / (normals.max() -
                                                            normals.min())
                mesh = lsfm_model.reconstruct(mesh_raw)
                lsfm_params.append(lsfm_model.project(mesh_raw))
                t_mesh = TexturedTriMesh(mesh.points, lsfm_tcoords.points,
                                         texture, mesh.trilist)
                m3io.export_textured_mesh(
                    t_mesh,
                    os.path.join(result_subdir,
                                 '%06d.obj' % (png_idx * minibatch_size + i)),
                    texture_extension='.png')
                mio.export_image(
                    Image(normals_norm),
                    os.path.join(
                        result_subdir,
                        '%06d_nor.png' % (png_idx * minibatch_size + i)))
                shape = images[i, 3:6]
                shape_norm = (shape - shape.min()) / (shape.max() -
                                                      shape.min())
                mio.export_image(
                    Image(shape_norm),
                    os.path.join(
                        result_subdir,
                        '%06d_shp.png' % (png_idx * minibatch_size + i)))
                mio.export_pickle(
                    t_mesh,
                    os.path.join(result_subdir,
                                 '%06d.pkl' % (png_idx * minibatch_size + i)))

        print('%0.2f seconds' % (time.time() - start))

    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
Beispiel #17
0
def generate_keyframed_video(run_id,
                             latents_idx,
                             subdir=None,
                             snapshot=None,
                             grid_size=[1, 1],
                             image_shrink=1,
                             image_zoom=1,
                             transition_frames=120,
                             smoothing_sec=1.0,
                             mp4=None,
                             mp4_fps=30,
                             mp4_codec='libx265',
                             mp4_bitrate='16M',
                             random_seed=1000,
                             minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    if subdir is None:
        subdir = misc.get_id_string_for_network_pkl(network_pkl)
    keyframe_dir = "results/images/" + subdir
    result_subdir = "results/videos/" + subdir
    if not os.path.exists(result_subdir):
        os.makedirs(result_subdir)

# codec mp4

# if mp4 is None:
#     count = len(glob.glob(result_subdir + "/*.avi"))
#     mp4 = str(count) + '-video.avi'

# codec AVI

    if mp4 is None:
        count = len(glob.glob(result_subdir + "/*.avi"))
        mp4 = str(count) + '-video.avi'

    files = [f for f in glob.glob(keyframe_dir + "/*.npy", recursive=True)]

    for f in files:
        print(f)

    latents = list(map(lambda idx: np.load(files[idx]), latents_idx))
    print('len(latents)', len(latents))

    num_frames = transition_frames * len(latents)
    duration_sec = num_frames / mp4_fps

    def make_frame(t):
        frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))

        section = frame_idx // transition_frames

        start = latents[section]
        end = latents[(section + 1) % len(latents)]

        transition_i = frame_idx - section * transition_frames
        maxindex = transition_frames - 1.0
        # mu1 = min(max(0, (transition_i*1.0/maxindex) ), 1)                             # linear interpolation
        # mu1 = min(max(0, (transition_i*1.0/maxindex)*(transition_i*1.0/maxindex) ), 1) # quadratic interpolation
        mu1 = min(max(0, 1 - math.cos(math.pi * transition_i / maxindex)),
                  2) / 2  # sine interpolation
        lat = np.multiply(start, 1.0 - mu1) + np.multiply(end, mu1)
        labels = np.zeros([lat.shape[0], 0], np.float32)
        images = Gs.run(lat,
                        labels,
                        minibatch_size=minibatch_size,
                        num_gpus=1,
                        out_mul=127.5,
                        out_add=127.5,
                        out_shrink=image_shrink,
                        out_dtype=np.uint8,
                        randomize_noise=False)
        grid = misc.create_image_grid(images, grid_size).transpose(1, 2,
                                                                   0)  # HWC
        if image_zoom > 1:
            grid = scipy.ndimage.zoom(grid, [image_zoom, image_zoom, 1],
                                      order=0)
        if grid.shape[2] == 1:
            grid = grid.repeat(3, 2)  # grayscale => RGB
        return grid

    # Generate video.
    import moviepy.editor  # pip install moviepy
    moviepy.editor.VideoClip(make_frame,
                             duration=duration_sec).write_videofile(
                                 os.path.join(result_subdir, mp4),
                                 fps=mp4_fps,
                                 codec='png',
                                 bitrate=mp4_bitrate)
    with open(os.path.join(result_subdir, mp4 + '-keyframes.txt'),
              'w') as file:
        file.write(str(latents_idx))
Beispiel #18
0
    def __init__(self, api, save_dir):
        ########################
        # INITIALIZE MODEL:
        ########################
       
        misc.init_output_logging()
        numpy.random.seed(config.random_seed)
        print('Initializing TensorFlow...')
        os.environ.update(config.env)
        tfutil.init_tf(config.tf_config)
        #-----------------
        network_pkl = misc.locate_network_pkl(14, None)
        print('Loading network from "%s"...' % network_pkl)
        self.G, self.D, self.Gs = misc.load_network_pkl(14, None)    
        self.random_state = numpy.random.RandomState()

        ########################
        # INITIALIZE API INFORMATION:
        ########################
        # Azure storage information
        self.table_account_name = ''        # NEEDS TO BE COMPLETED WITH AZURE ACCOUNT
        self.table_account_key = ''         # NEEDS TO BE COMPLETED WITH AZURE ACCOUNT
        self.block_blob_service = BlockBlobService(self.table_account_name, self.table_account_key) 
        self.https_prefix = ''              # NEEDS TO BE COMPLETED WITH AZURE ACCOUNT
        self.table_service = TableService(account_name=self.table_account_name, account_key=self.table_account_key)
        self.container_name = ''            # NEEDS TO BE COMPLETED WITH AZURE ACCOUNT

        # Microsoft face detection 
        self.msft_face_detection_key = ''  # NEEDS TO BE COMPLETED WITH MSFT API ACCOUNT
        self.msft_face_detection_url = ''  # NEEDS TO BE COMPLETED WITH MSFT API ACCOUNT
        self.msft_face_attributes = 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise'
        self.msft_headers = {'Ocp-Apim-Subscription-Key': self.msft_face_detection_key}
        self.msft_params = {
            'returnFaceId': 'true',
            'returnFaceLandmarks': 'false',
            'returnFaceAttributes': self.msft_face_attributes
        }

        # FacePlusPlus face detection 
        self.faceplusplus_http_url = 'https://api-us.faceplusplus.com/facepp/v3/detect'
        self.faceplusplus_key = ""          # NEEDS TO BE COMPLETED WITH FACE++ API ACCOUNT
        self.faceplusplus_secret = ""       # NEEDS TO BE COMPLETED WITH FACE++ API ACCOUNT

        # IBM Watson face detection
        self.IBM_visual_recognition = VisualRecognitionV3(
            version='2018-03-19',
            iam_api_key='',                 # NEEDS TO BE COMPLETED WITH IBM API ACCOUNT
            url = 'https://gateway.watsonplatform.net/visual-recognition/api'
        )
        
        # Amazon AWS Rekognition face detection:
        self.amazon_face_detection_id = ''  # NEEDS TO BE COMPLETED WITH AMAZON API ACCOUNT
        self.amazon_face_detection_key = '' # NEEDS TO BE COMPLETED WITH AMAZON API ACCOUNT
        self.amazon_client = boto3.client('rekognition','us-east-1',aws_access_key_id=self.amazon_face_detection_id,aws_secret_access_key=self.amazon_face_detection_key)

        # SightEngine:
        self.SEclient = SightengineClient('', '')   # NEEDS TO BE COMPLETED WITH SE API ACCOUNT

        ########################
        # SET WHICH FACE API TO USE:
        ########################
        self.faceAPI = api  # or "FacePlusePlus" or "IBM" or "Google" or "Amazon" or "SE"

        self.save_dir = save_dir+'\\images'
        self.raw_results = save_dir+'\\raw_results.txt'

        f = open(self.raw_results, 'a')
        f.write("ImageLocation,Race_Int,Gender_Int,Race,Gender,Face_Detected,Gender_Prediction,Gender_Correct\n")
        f.close()

        self.image_count=0
Beispiel #19
0
def generate_fake_images(run_id,
                         snapshot=None,
                         grid_size=[1, 1],
                         num_pngs=1,
                         image_shrink=1,
                         png_prefix=None,
                         random_seed=1000,
                         minibatch_size=8):

    embeddings_contant = False
    labels_constant = False
    latents_constant = False

    idx = random.randint(0, 56880)
    df = pandas.read_csv('datasets/50k_sorted_tf/50k_index_sorted.csv')
    print('embeddings_contant : ' + str(embeddings_contant))
    print('labels_constant : ' + str(labels_constant))
    print('latents_constant : ' + str(latents_constant))

    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if png_prefix is None:
        png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-'
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    result_subdir = misc.create_result_subdir(config.result_dir + '/' + run_id,
                                              config.desc)

    if latents_constant:
        latents = misc.random_latents(np.prod(grid_size),
                                      Gs,
                                      random_state=None)
    #embeddings = np.zeros([1, 300], dtype=np.float32)
    #labels = np.zeros([1, 32], dtype=np.float32)
    embeddings = np.load(
        'datasets/50k_sorted_tf/sum_embedding_title.embeddings')
    embeddings = embeddings.astype('float32')

    labels = np.load(
        'datasets/50k_sorted_tf/sum_embedding_category_average.labels')
    labels = labels.astype('float32')
    name1 = ''
    if labels_constant:
        label = labels[idx]
        name1 = name1 + ' ' + df.at[idx, 'category1']
        label = label.reshape(1, label.shape[0])

    if embeddings_contant:
        embedding = embeddings[idx]
        title = df.at[idx, 'title']
        name1 = name1 + ' ' + title[:10]
        embedding = embedding.reshape(1, embedding.shape[0])

    #print(latents.shape)
    for png_idx in range(num_pngs):
        name = ''
        name = name + name1
        print('Generating png %d / %d...' % (png_idx, num_pngs))
        rand = random.randint(0, 56880)
        #rand = png_idx * 1810
        #labels = sess.run(classes[0])
        if not latents_constant:
            latents = misc.random_latents(np.prod(grid_size),
                                          Gs,
                                          random_state=random_state)
        if not labels_constant:
            label = labels[rand]
            label = label.reshape(1, label.shape[0])
            name = name + ' ' + df.at[rand, 'category1']
        if not embeddings_contant:
            embedding = embeddings[rand]
            title = df.at[rand, 'title']
            name = name + ' ' + title[:10]
            embedding = embedding.reshape(1, embedding.shape[0])

        #print(labels.shape)
        images = Gs.run(latents,
                        label,
                        embedding,
                        minibatch_size=minibatch_size,
                        num_gpus=config.num_gpus,
                        out_mul=127.5,
                        out_add=127.5,
                        out_shrink=image_shrink,
                        out_dtype=np.uint8)
        misc.save_image_grid(
            images, os.path.join(result_subdir,
                                 '%s%06d.png' % (name, png_idx)), [0, 255],
            grid_size)
    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
Beispiel #20
0
def generate_fake_images(run_id,
                         snapshot=None,
                         grid_size=[1, 1],
                         batch_size=8,
                         num_pngs=1,
                         image_shrink=1,
                         png_prefix=None,
                         random_seed=1000,
                         minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if png_prefix is None:
        png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-'
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    result_subdir = misc.create_result_subdir(config_test.result_dir,
                                              config_test.desc)
    for png_idx in range(int(num_pngs / batch_size)):
        start = time.time()
        print('Generating png %d-%d / %d... in ' %
              (png_idx * batch_size, (png_idx + 1) * batch_size, num_pngs),
              end='')
        latents = misc.random_latents(np.prod(grid_size) * batch_size,
                                      Gs,
                                      random_state=random_state)
        labels = np.zeros([latents.shape[0], 7], np.float32)
        images = Gs.run(latents,
                        labels,
                        minibatch_size=minibatch_size,
                        num_gpus=config_test.num_gpus,
                        out_shrink=image_shrink)
        for i in range(batch_size):
            if images.shape[1] == 3:
                mio.export_pickle(
                    images[i],
                    os.path.join(
                        result_subdir,
                        '%s%06d.pkl' % (png_prefix, png_idx * batch_size + i)))
                # misc.save_image(images[i], os.path.join(result_subdir, '%s%06d.png' % (png_prefix, png_idx*batch_size+i)), [0,255], grid_size)
            elif images.shape[1] == 6:
                mio.export_pickle(images[i][3:6],
                                  os.path.join(
                                      result_subdir, '%s%06d.pkl' %
                                      (png_prefix, png_idx * batch_size + i)),
                                  overwrite=True)
                misc.save_image(
                    images[i][0:3],
                    os.path.join(
                        result_subdir,
                        '%s%06d.png' % (png_prefix, png_idx * batch_size + i)),
                    [-1, 1], grid_size)
            elif images.shape[1] == 9:
                mio.export_pickle(images[i][3:6],
                                  os.path.join(
                                      result_subdir, '%s%06d_shp.pkl' %
                                      (png_prefix, png_idx * batch_size + i)),
                                  overwrite=True)
                mio.export_pickle(images[i][6:9],
                                  os.path.join(
                                      result_subdir, '%s%06d_nor.pkl' %
                                      (png_prefix, png_idx * batch_size + i)),
                                  overwrite=True)
                misc.save_image(
                    images[i][0:3],
                    os.path.join(
                        result_subdir,
                        '%s%06d.png' % (png_prefix, png_idx * batch_size + i)),
                    [-1, 1], grid_size)
        print('%0.2f seconds' % (time.time() - start))

    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
Beispiel #21
0
def generate_interpolation_images(run_id,
                                  snapshot=None,
                                  grid_size=[1, 1],
                                  image_shrink=1,
                                  image_zoom=1,
                                  duration_sec=60.0,
                                  smoothing_sec=1.0,
                                  mp4=None,
                                  mp4_fps=30,
                                  mp4_codec='libx265',
                                  mp4_bitrate='16M',
                                  random_seed=1000,
                                  minibatch_size=8):

    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if mp4 is None:
        mp4 = misc.get_id_string_for_network_pkl(network_pkl) + '-lerp.mp4'
    num_frames = int(np.rint(duration_sec * mp4_fps))
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    print('Generating latent vectors...')
    shape = [num_frames, np.prod(grid_size)] + [
        Gs.input_shape[1:][0] + Gs.input_shapes[1][1:][0]
    ]  # [frame, image, channel, component]
    all_latents = random_state.randn(*shape).astype(np.float32)
    all_latents = scipy.ndimage.gaussian_filter(
        all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape),
        mode='wrap')
    all_latents /= np.sqrt(np.mean(np.square(all_latents)))

    #10 10 10 10 5 3 10
    # model = mio.import_pickle('../models/lsfm_shape_model_fw.pkl')
    # facesoft_model = mio.import_pickle('../models/facesoft_id_and_exp_3d_face_model.pkl')['shape_model']
    # lsfm_model = m3io.import_lsfm_model('/home/baris/Projects/faceganhd/models/all_all_all.mat')
    # model_mean = lsfm_model.mean().copy()
    # mask = mio.import_pickle('../UV_spaces_V2/mask_full_2_crop.pkl')
    lsfm_tcoords = \
    mio.import_pickle('512_UV_dict.pkl')['tcoords']
    lsfm_params = []
    result_subdir = misc.create_result_subdir(config_test.result_dir,
                                              config_test.desc)
    for png_idx in range(int(num_frames / minibatch_size)):
        start = time.time()
        print('Generating png %d-%d / %d... in ' %
              (png_idx * minibatch_size,
               (png_idx + 1) * minibatch_size, num_frames),
              end='')
        latents = all_latents[png_idx * minibatch_size:(png_idx + 1) *
                              minibatch_size, 0, :Gs.input_shape[1:][0]]
        labels = all_latents[png_idx * minibatch_size:(png_idx + 1) *
                             minibatch_size, 0, Gs.input_shape[1:][0]:]
        labels_softmax = softmax(labels) * np.array([10, 10, 10, 10, 5, 3, 10])
        images = Gs.run(latents,
                        labels_softmax,
                        minibatch_size=minibatch_size,
                        num_gpus=config_test.num_gpus,
                        out_shrink=image_shrink)
        for i in range(minibatch_size):
            texture = Image(np.clip(images[i, 0:3] / 2 + 0.5, 0, 1))
            img_shape = ndimage.gaussian_filter(images[i, 3:6],
                                                sigma=(0, 3, 3),
                                                order=0)
            mesh_raw = from_UV_2_3D(Image(img_shape),
                                    topology='full',
                                    uv_layout='oval')
            # model_mean.points[mask,:] = mesh_raw.points
            normals = images[i, 6:9]
            normals_norm = (normals - normals.min()) / (normals.max() -
                                                        normals.min())
            mesh = mesh_raw  #facesoft_model.reconstruct(model_mean).from_mask(mask)
            # lsfm_params.append(lsfm_model.project(mesh_raw))
            t_mesh = TexturedTriMesh(mesh.points, lsfm_tcoords.points, texture,
                                     mesh.trilist)
            m3io.export_textured_mesh(
                t_mesh,
                os.path.join(result_subdir,
                             '%06d.obj' % (png_idx * minibatch_size + i)),
                texture_extension='.png')
            fix_obj(
                os.path.join(result_subdir,
                             '%06d.obj' % (png_idx * minibatch_size + i)))
            mio.export_image(
                Image(normals_norm),
                os.path.join(result_subdir,
                             '%06d_nor.png' % (png_idx * minibatch_size + i)))
        print('%0.2f seconds' % (time.time() - start))
    mio.export_pickle(lsfm_params,
                      os.path.join(result_subdir, 'lsfm_params.pkl'))
    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()