Ejemplo n.º 1
0
 def generate_images(self, step):
     feed_dict = {self.noise: self.fixed_noise_128, self.is_training: False}
     samples = self.sess.run(self.fake_data, feed_dict=feed_dict)
     samples = ((samples + 1.) * 255. / 2.).astype('int32')
     task_dir = os.path.join(self.config.save_images_dir, self.exp_name)
     if not os.path.exists(task_dir):
         os.mkdir(task_dir)
     save_path = os.path.join(task_dir, 'images_{}.jpg'.format(step))
     save_images.save_images(samples.reshape((-1, 32, 32, 3)), save_path)
Ejemplo n.º 2
0
 def generate_images(self, step):
     feed_dict = {self.noise: self.fixed_noise_128,
                  self.is_training: False}
     samples = self.sess.run(self.fake_data, feed_dict=feed_dict)
     checkpoint_dir = os.path.join(self.config.save_images_dir, self.exp_name)
     if not os.path.exists(checkpoint_dir):
         os.mkdir(checkpoint_dir)
     save_path = os.path.join(checkpoint_dir, 'images_{}.jpg'.format(step))
     save_images.save_images(samples.reshape((-1, 28, 28, 1)), save_path)
Ejemplo n.º 3
0
    def evaluate(self, step,action_list,trial=1):

        samples    = self.generator(self.fixed_noise_128).numpy()
        checkpoint_dir = os.path.join('Results',self.config.data_task, self.exp_name,"Trial-{}".format(trial))
        X = self.train_dataset.sample_batch(self.config.vis_count)

        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)
        # print(samples,X)
        bbox=[-2.5, 2.5, -2.5, 2.5]

        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)
        
        img_save_path = os.path.join(checkpoint_dir,'output', 'images_{}.png'.format(step))
        kl_save_path = os.path.join(checkpoint_dir,'kl')
        
        
        if not os.path.exists(os.path.join(checkpoint_dir,'output')):
            os.makedirs(os.path.join(checkpoint_dir,'output'))
        
        if not os.path.exists(kl_save_path):
            os.makedirs(kl_save_path)
        



        save_images.save_images(samples.reshape((-1, 28, 28, 1)), img_save_path)

        inception_score = get_inception_score(samples.reshape((-1, 28, 28, 1)))[0].numpy()
        
        fid = get_fid(X.reshape((-1, 28, 28, 1)),samples.reshape((-1, 28, 28, 1)))[0].numpy()
        
        self.inception_score.append(inception_score)
        self.fid.append(fid)

        kl_data = {"INCEPTION_SCORE":self.inception_score,"FID":self.fid}
        

        print(' \n INCEPTION_SCORE ',inception_score)
        print(' \n FID ',fid)
        pd.DataFrame.from_dict(kl_data).to_csv('{}/SCORES.csv'.format(kl_save_path),header=False,index=False)


        if(len(action_list)>0):
            action_list = np.array(action_list)
            action_data = {"G":action_list[:,0],"D":action_list[:,1]}
            pd.DataFrame.from_dict(action_data).to_csv('{}/action_distribution.csv'.format(kl_save_path),header=False,index=False)
Ejemplo n.º 4
0
def main(argv):

    if FLAGS.framework == 'gan': from frameworks.gan import GAN as Framework
    if FLAGS.framework == 'vae': from frameworks.vae import VAE as Framework
    if FLAGS.framework == 'avae': from frameworks.avae import AVAE as Framework
    if FLAGS.framework == 'bigan':
        from frameworks.bigan import BIGAN as Framework
    if FLAGS.framework == 'vaegan':
        from frameworks.vaegan import VAEGAN as Framework

    if FLAGS.dataset == 'svhn': from datasets import svhn as dataset
    if FLAGS.dataset == 'celeba': from datasets import celeba as dataset
    if FLAGS.dataset == 'bedroom': from datasets import bedroom as dataset
    if FLAGS.dataset == 'cifar10': from datasets import cifar10 as dataset
    if FLAGS.dataset == 'cifar100': from datasets import cifar100 as dataset
    if FLAGS.dataset == 'dsprites': from datasets import dsprites as dataset

    # Create working directories
    experiment_dir = os.path.join(FLAGS.output_dir, FLAGS.experiment_name,
                                  FLAGS.framework, FLAGS.dataset)

    checkpoints_dir = os.path.join(experiment_dir, 'checkpoints')
    saved_model_dir = os.path.join(experiment_dir, 'saved_models')
    images_dir = os.path.join(experiment_dir, 'images')
    test_dir = os.path.join(experiment_dir, 'test')
    os.makedirs(checkpoints_dir, exist_ok=True)
    os.makedirs(saved_model_dir, exist_ok=True)
    os.makedirs(images_dir, exist_ok=True)
    os.makedirs(test_dir, exist_ok=True)

    if FLAGS.mode == 'train':
        logging.get_absl_handler().use_absl_log_file('logs', experiment_dir)

    # Load dataset
    dataset, img_size = dataset.load(
        FLAGS.batch_size,
        mode='test' if FLAGS.mode == 'test_mse_lpips' else 'train')

    # Load framework
    framework = Framework(
        img_size=img_size,
        latent_dim=FLAGS.latent_dim,
        width_multiplier=FLAGS.width_multiplier,
        learning_rate=tf.optimizers.schedules.ExponentialDecay(
            initial_learning_rate=FLAGS.initial_learning_rate,
            decay_steps=50000,
            decay_rate=FLAGS.decay_rate))

    # Manage checkpoints
    ckpt = tf.train.Checkpoint(step=tf.Variable(0),
                               **framework.models,
                               **framework.optimizers)
    manager = tf.train.CheckpointManager(ckpt,
                                         os.path.join(experiment_dir,
                                                      'checkpoints'),
                                         max_to_keep=1)

    # Restore checkpoint
    if FLAGS.restore: ckpt.restore(manager.latest_checkpoint)

    # ================================ TESTING =====================================

    if 'test' in FLAGS.mode:
        # load models
        for name, model in framework.models.items():
            model.load_weights(
                os.path.join(saved_model_dir,
                             '{}_{:06d}.h5'.format(name, 50001)))

        # --------------------------- LPIPS & MSE ------------------------------

        if FLAGS.mode == 'test_mse_lpips':
            if FLAGS.framework in ['avae', 'vae', 'bigan', 'vaegan']:
                from utils.lpips import lpips

                N = 1000
                N_batch = (N + FLAGS.batch_size - 1) // FLAGS.batch_size

                lpips_value = 0.0
                mse_value = 0.0
                for i, features in dataset.enumerate():
                    i = i.numpy()
                    _, images = framework.eval_step(features['image'])
                    if FLAGS.framework == 'avae':
                        x_real, _, x_fake = tf.split(images, 3)
                    if FLAGS.framework == 'bigan':
                        x_fake, x_real, _ = tf.split(images, 3)
                    if FLAGS.framework == 'vae':
                        x_real, x_fake = tf.split(images, 2)
                    if FLAGS.framework == 'vaegan':
                        x_real, x_fake = tf.split(images, 2)
                    lpips_value += 1.0 / (i + 1.0) * (tf.reduce_mean(
                        lpips.lpips(x_real, x_fake)).numpy() - lpips_value)
                    mse_value += 1.0 / (i + 1.0) * (tf.reduce_mean(
                        tf.math.square(x_fake - x_real)).numpy() - mse_value)

            else:
                lpips_value, mse_value = -1, -1
            fid_value = -1
        # ------------------------------- FID ----------------------------------

        if FLAGS.mode == 'test_fid':
            from utils.frechet_inception_distance import FrechetInceptionDistance

            N = 50000
            N_batch = (N + FLAGS.batch_size - 1) // FLAGS.batch_size
            fid = FrechetInceptionDistance(dataset, N)
            gen_images = []
            i = 0
            for _ in range(N_batch):
                i += 1
                print(i)
                images = framework.generate(FLAGS.batch_size)
                gen_images.append(images)
            fid_value = fid(gen_images)
            mse_value, lpips_value = -1, -1

        # ---------------------------- Write results ---------------------------

        output_file = os.path.join(test_dir, FLAGS.mode + '.csv')
        with open(output_file, 'w') as f:
            f.write('mse,lpips,fid\n')
            f.write('{:<5.2f},{:<5.2f},{:<5.2f}'.format(
                mse_value, lpips_value, fid_value))


# ================================ TRAINING ====================================

    if FLAGS.mode == 'train':
        for step, features in dataset.enumerate(FLAGS.initial_step):
            framework.train_step(features['image'])

            if step % FLAGS.eval_freq == 0:
                logging_message, images = framework.eval_step(
                    features['image'])
                save_images(
                    np.array(images),
                    os.path.join(images_dir, 'image_{}.png'.format(step)))
                logging.info('step: {:06d} - '.format(step) + logging_message)

            if step % FLAGS.save_freq == 0 and step != 0:
                manager.save()

            ckpt.step.assign_add(1)
            if step == FLAGS.final_step + 1: break

        # Save model
        for name, model in framework.models.items():
            model.save_weights(
                os.path.join(saved_model_dir,
                             '{}_{:06d}.h5'.format(name, step)))
Ejemplo n.º 5
0
def latent_traversal():

    experiment_dir = os.path.join(FLAGS.output_dir,
                                  '{}'.format(FLAGS.transform),
                                  '{}'.format(FLAGS.y))

    # =============================== LOAD MODEL ===================================

    model_name = 'biggan{}_{}'.format('-deep' if FLAGS.deep else '',
                                      FLAGS.image_size)

    model = MODELS[model_name].BigGan(sn=True)
    model.build([(1, z_dim), (1, n_cat)])
    model.load_weights(os.path.join('models', 'biggan_imagenet', 'weights',
                                    '{}.h5'.format(model_name)),
                       by_name=True)

    # ======================== DEFINE COMPUTATION GRAPH ============================

    z = tf.Variable(initial_value=tf.zeros([FLAGS.batch_size_III, z_dim]))
    c = tf.Variable(initial_value=np.tile(
        np.eye(1000)[[FLAGS.y]], [FLAGS.batch_size_III, 1]))
    alpha = tf.Variable(initial_value=tf.zeros([]))

    @tf.function
    def project(u, v):
        dot = tf.linalg.tensordot(u, v, axes=[[-1], [-1]])
        dot = tf.reshape(dot, [-1, 1])
        u_proj_v = u - dot * tf.reshape(v, [1, -1])
        return u_proj_v

    @tf.function
    def generate_image():
        x = model([z, c], training=False)
        return x


# ========================= GENERATE LATENT TRAVERSAL ==========================

    os.makedirs(os.path.join(experiment_dir, 'latent_traversals'),
                exist_ok=True)

    # read the direction
    filename = os.path.join(experiment_dir, 'directions', 'direction.csv')
    with open(filename, 'r') as f:
        u = [float(e) for e in f.readline().split(', ')]
        u = tf.cast(u, tf.float32)

    for batch in range(FLAGS.num_traversals // FLAGS.batch_size_III):
        z_0 = tf.random.normal([FLAGS.batch_size_III, z_dim])
        for alpha in np.linspace(-5.0, 5.0, 11):
            z.assign(project(z_0, u) + alpha * u)
            x = generate_image()

            for i in range(FLAGS.batch_size_III):
                save_images(
                    x.numpy()[i:i + 1, :, :, :],
                    os.path.join(
                        experiment_dir, 'latent_traversals',
                        '{}_alpha={:2.1f}.png'.format(
                            batch * FLAGS.batch_size_III + i,
                            (alpha + 5) / 10)))
def generate_trajectory():

    experiment_dir = os.path.join(FLAGS.output_dir,
                                  '{}'.format(FLAGS.transform),
                                  '{}'.format(FLAGS.y))

    # =============================== LOAD MODEL ===================================

    model_name = 'biggan{}_{}'.format('-deep' if FLAGS.deep else '',
                                      FLAGS.image_size)

    model = MODELS[model_name].BigGan(sn=True)
    model.build([(1, z_dim), (1, n_cat)])
    model.load_weights(os.path.join('models', 'biggan_imagenet', 'weights',
                                    '{}.h5'.format(model_name)),
                       by_name=True)

    if FLAGS.transform == 'horizontal_position':
        transform = transformations.translate_horizontally
    if FLAGS.transform == 'vertical_position':
        transform = transformations.translate_vertically
    if FLAGS.transform == 'scale':
        transform = transformations.zoom
    if FLAGS.transform == 'brightness':
        transform = transformations.change_brightness

# ======================== DEFINE COMPUTATION GRAPH ============================

    z = tf.Variable(  # latent code
        initial_value=tf.zeros([FLAGS.batch_size, z_dim]))
    c = tf.Variable(  # one hot vector for category
        initial_value=np.tile(np.eye(1000)[[FLAGS.y]], [FLAGS.batch_size, 1]))
    T = tf.Variable(  # transformation
        initial_value=tf.zeros([]))
    optimizer = tf.optimizers.Adam(0.01)

    def renormalize(z):
        z.assign(tf.clip_by_norm(z, tf.math.sqrt(float(z_dim)), axes=[-1]))

    @tf.function
    def generate_image(z, c):
        x = model([z, c], training=False)
        return x

    @tf.function
    def initialize_optimizer():
        for variable in optimizer.variables():
            variable.assign(variable.initial_value)

    @tf.function
    def train_step(targets, masks, sigma):
        """Make update the latent code `z` to make `G(z)` closer to `target`"""
        with tf.GradientTape() as tape:
            x = generate_image(z, c)
            errors = reconstruction_errors.blurred_mse(x, targets, masks,
                                                       sigma)
            error = tf.reduce_mean(errors)
        gradients = tape.gradient(error, z)
        optimizer.apply_gradients(zip([gradients], [z]))
        return x, errors


# ============================= GENERATE TFRECORD ==============================

# create directory for the tfrecord files

    os.makedirs(os.path.join(experiment_dir, 'trajectories', 'images',
                             'generated'),
                exist_ok=True)
    os.makedirs(os.path.join(experiment_dir, 'trajectories', 'images',
                             'target'),
                exist_ok=True)

    # create tfrecord writer
    writer = tf.io.TFRecordWriter(
        os.path.join(experiment_dir, 'trajectories', 'tfrecord.tfrecord'))

    for batch in range(FLAGS.num_trajectories // FLAGS.batch_size):

        # Initialization
        z_0 = tf.random.normal([FLAGS.batch_size, z_dim])
        if FLAGS.renorm:
            z_0 = tf.clip_by_norm(z_0, tf.math.sqrt(float(z_dim)), axes=[-1])
        z.assign(z_0)
        x_0 = generate_image(z, c)

        for t_sign in [-1, 1]:
            z.assign(z_0)
            for t_mod in np.linspace(0.1, 0.5, 5):
                t = t_sign * t_mod
                T.assign(t)
                target, mask = transform(x_0, T)

                # Find z for the intermediate transformation
                initialize_optimizer()
                for step in range(FLAGS.n_steps):
                    x, errors = train_step(target, mask, FLAGS.sigma)
                    if FLAGS.renorm: renormalize(z)

                # Save images
                for i in range(FLAGS.batch_size):
                    example = tf.train.Example(features=tf.train.Features(
                        feature={
                            'z_0': float_list_feature(z_0[i]),
                            'z_t': float_list_feature(z[i]),
                            'y': int64_feature(FLAGS.y),
                            'error': float_feature(errors[i]),
                            'delta_t': float_feature(t),
                        }))
                    writer.write(example.SerializeToString())

                    save_images(
                        x.numpy()[i:i + 1, :, :, :],
                        os.path.join(
                            experiment_dir, 'trajectories', 'images',
                            'generated', '{}_t={:2.1f}.png'.format(
                                batch * FLAGS.batch_size + i, t + 0.5)))

                    save_images(
                        target.numpy()[i:i + 1, :, :, :],
                        os.path.join(
                            experiment_dir, 'trajectories', 'images', 'target',
                            '{}_t={:2.1f}.png'.format(
                                batch * FLAGS.batch_size + i, t + 0.5)))

    writer.close()
Ejemplo n.º 7
0
def get_barycenter():

    experiment_dir = os.path.join(FLAGS.output_dir,
                                  '{}'.format(FLAGS.transform),
                                  '{}'.format(FLAGS.y))

    os.makedirs(os.path.join(experiment_dir, 'barycenters', 'saliency_maps'),
                exist_ok=True)

    # =============================== LOAD MODEL ===================================

    model = build_model()
    model.load_state_dict(
        torch.load(os.path.join('models', 'saliency', 'final.pth')))
    model.eval().cuda()

    # =============================== LOAD DATA ====================================

    class ImageDataTest(data.Dataset):
        def __init__(self):
            self.data_root = os.path.join(experiment_dir, 'latent_traversals')
            self.image_list = sorted(os.listdir(self.data_root))
            self.image_num = len(self.image_list)

        def __getitem__(self, item):
            path = os.path.join(self.data_root, self.image_list[item])
            image = cv2.imread(path)
            image = np.array(image, dtype=np.float32)
            image = image - np.array((104.00699, 116.66877, 122.67892))
            image = image.transpose((2, 0, 1))
            image = torch.Tensor(image)
            return {
                'image': image,
                'name': self.image_list[item % self.image_num],
            }

        def __len__(self):
            return self.image_num

    data_loader = data.DataLoader(dataset=ImageDataTest(),
                                  batch_size=FLAGS.batch_size_IV,
                                  shuffle=False,
                                  num_workers=8,
                                  pin_memory=False)

    # ================================ RUN MODEL ===================================

    filename = os.path.join(experiment_dir, 'barycenters', 'barycenters.csv')
    with open(filename, 'w') as f:
        f.write('name,x,y,mean\n')
        for i, batch in enumerate(data_loader):
            images = batch['image']
            name = batch['name'][0]

            with torch.no_grad():
                images = Variable(images)
                images = images.cuda()
                preds = model(images)
                pred = np.squeeze(torch.sigmoid(preds).cpu().data.numpy())

                # Compute barycenter:
                x, y, mean = compute_barycenter(pred)
                template = '{},{:4.3f},{:4.3f},{:4.3f}\n'
                f.write(template.format(name, x, y, mean))

                pred = np.array([pred[:, :]])
                filename = os.path.join(experiment_dir, 'barycenters',
                                        'saliency_maps', name)
                save_images(pred, filename)