Example #1
0
def main(_):
    # create out_dir
    tf.io.gfile.mkdir(flags.FLAGS.out_dir)

    # disable eager execution
    tf.compat.v1.reset_default_graph()
    tf.compat.v1.disable_eager_execution()

    # restore cli parameters
    category2indx = {v: k for k, v in indx2category.items()}
    name = flags.FLAGS.category
    category = category2indx[name]

    # create target category
    target = tf.fill([1], category)

    with tf.compat.v1.variable_scope('Generator',
                                     reuse=tf.compat.v1.AUTO_REUSE):
        # run, for variables creation
        latent = tf.random.truncated_normal(shape=(1, 128))
        feed(latent, target, training=True)

    with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
            allow_soft_placement=True)) as sess:
        with tf.compat.v1.variable_scope('Generator',
                                         reuse=tf.compat.v1.AUTO_REUSE):
            # run build
            init_op = tf.compat.v1.global_variables_initializer()
            sess.run(init_op)

            # run restore
            saver = tf.compat.v1.train.Saver()
            saver.restore(sess, flags.FLAGS.pretrained_path)

            latent_1 = truncnorm.rvs(-1, 1, size=(1, 128)).astype(np.float32)
            latent_2 = truncnorm.rvs(-1, 1, size=(1, 128)).astype(np.float32)
            latent_vars = interpolate_points(latent_1, latent_2)

            img, _ = feed(latent_1, target)
            plot_img = change_range(img[0].eval(), -1, 1, 0,
                                    255).astype(np.uint8)
            save_img_cv2(plot_img,
                         os.path.join(flags.FLAGS.out_dir, 'latent_1.png'))

            img, _ = feed(latent_2, target)
            plot_img = change_range(img[0].eval(), -1, 1, 0,
                                    255).astype(np.uint8)
            save_img_cv2(plot_img,
                         os.path.join(flags.FLAGS.out_dir, 'latent_2.png'))

            for i, latent in enumerate(latent_vars):
                img, _ = feed(latent, target)
                plot_img = change_range(img[0].eval(), -1, 1, 0,
                                        255).astype(np.uint8)
                save_img_cv2(plot_img,
                             os.path.join(flags.FLAGS.out_dir, f'{i}.png'))
def main(_):
    # create out_dir
    tf.io.gfile.mkdir(flags.FLAGS.out_dir)

    # disable eager execution
    tf.compat.v1.reset_default_graph()
    tf.compat.v1.disable_eager_execution()

    # restore cli parameters
    category2indx = {v: k for k, v in indx2category.items()}
    bs = flags.FLAGS.bs
    num_batches = flags.FLAGS.num_bs
    name = flags.FLAGS.category
    category = category2indx[name]

    # create target category
    target = tf.fill([bs], category)

    with tf.compat.v1.variable_scope('Generator',
                                     reuse=tf.compat.v1.AUTO_REUSE):
        # run, for variables creation
        latent = tf.random.truncated_normal(shape=(bs, 128))
        feed(latent, target, training=True)

    with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
            allow_soft_placement=True)) as sess:
        with tf.compat.v1.variable_scope('Generator',
                                         reuse=tf.compat.v1.AUTO_REUSE):
            # run build
            init_op = tf.compat.v1.global_variables_initializer()
            sess.run(init_op)

            # run restore
            saver = tf.compat.v1.train.Saver()
            saver.restore(sess, flags.FLAGS.pretrained_path)

            with tf.device('/GPU:0'):
                latent = tf.random.truncated_normal(shape=(bs, 128))
                img, _ = feed(latent, target)

            indx = 0
            for i in range(num_batches):
                sess.run(img)
                for j in range(bs):
                    indx += 1
                    plot_img = change_range(img[j].eval(), -1, 1, 0,
                                            255).astype(np.uint8)
                    save_img_cv2(
                        plot_img,
                        os.path.join(flags.FLAGS.out_dir,
                                     f'{name}_{indx}.png'))
Example #3
0
parser.add_argument('--bs', type=int, default=6)

if __name__ == '__main__':
    args = parser.parse_args()
    # parameterize config
    config['experiment_name'] = args.experiment_name
    config['ema'] = args.ema
    config['weights_root'] = args.weights_root
    config['smyrf'] = False
    config['return_attn_map'] = True

    biggan = Biggan(config)
    biggan.load_pretrained()

    # Random sampling
    category2indx = {val: key for key, val in indx2category.items()}
    if args.imagenet_category is not None:
        category = category2indx[args.imagenet_category]
    else:
        category = None

    generator_inputs = biggan.get_random_inputs(bs=args.bs,
                                                target=category,
                                                seed=args.seed)
    out, attn_map, _ = biggan.sample(generator_inputs, return_attn_map=True)

    print('Computing singular values...')

    fig = plt.figure()
    fig.suptitle(
        f'Singular values of BigGAN\'s attention map (after softmax) for {args.bs} randomly generated images.'
def main(_):
    # create output dir if it does not exist
    tf.io.gfile.mkdir(flags.FLAGS.out_dir)
    ''' Main function '''
    tf.compat.v1.reset_default_graph()
    tf.compat.v1.disable_eager_execution()
    category2indx = {v: k for k, v in indx2category.items()}

    name = flags.FLAGS.category
    indx = category2indx[name]

    np.random.seed(flags.FLAGS.seed)
    tf.compat.v2.random.set_seed(flags.FLAGS.seed)
    latent = np.random.normal(size=(flags.FLAGS.bs, 128)).astype(np.float32)

    target = tf.fill([flags.FLAGS.bs], indx)

    with tf.device('/GPU:0'):
        opt = tf.compat.v1.train.AdamOptimizer(flags.FLAGS.lr)

    with tf.compat.v1.variable_scope('Generator',
                                     reuse=tf.compat.v1.AUTO_REUSE):
        img, _, = feed(latent, target, training=True)

    with tf.compat.v1.variable_scope('Discriminator',
                                     reuse=tf.compat.v1.AUTO_REUSE):
        feed_disc(img, target)

    with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
            allow_soft_placement=True)) as sess:
        init_op = tf.compat.v1.global_variables_initializer()
        sess.run(init_op)
        saver = tf.compat.v1.train.Saver()
        saver.restore(sess, flags.FLAGS.pretrained_path)

        if not flags.FLAGS.image_path:
            with tf.compat.v1.variable_scope('Generator',
                                             reuse=tf.compat.v1.AUTO_REUSE):
                img, g_attn_map = feed(latent, target)
                if flags.FLAGS.inject_noise:
                    img += tf.random.normal([flags.FLAGS.bs, 128, 128, 3],
                                            stddev=flags.FLAGS.std,
                                            mean=0)
        else:
            img = change_range(plt.imread(flags.FLAGS.image_path), 0, 255, -1,
                               1).astype(np.float32)
            img = tf.image.resize(img, (128, 128)).eval()
            img = np.expand_dims(img, axis=0)
            if flags.FLAGS.inject_noise:
                img += tf.random.normal([flags.FLAGS.bs, 128, 128, 3],
                                        stddev=flags.FLAGS.std,
                                        mean=0)
                img = img.eval()
            img = np.repeat(img, flags.FLAGS.bs, axis=0)

        with tf.compat.v1.variable_scope('Discriminator',
                                         reuse=tf.compat.v1.AUTO_REUSE):
            _, d_attn, d_attn_map = feed_disc(img, target)

        step = tf.Variable(0, trainable=False)
        latent_ = tf.Variable(tf.random.truncated_normal(shape=(flags.FLAGS.bs,
                                                                128)),
                              trainable=True,
                              name='latent_')
        sess.run(tf.compat.v1.variables_initializer([latent_, step]))

        lookahead = BaseLookAhead([latent_], k=5, alpha=0.5)
        sess.run(lookahead.get_ops())

        loss_vars = [latent_, img, d_attn, d_attn_map, target, step]
        loss_value, img_loss = discriminator_loss(loss_vars)

        update_op = opt.minimize(loss_value, var_list=[latent_])
        sess.run(tf.compat.v1.variables_initializer(opt.variables()))

        for i in range(flags.FLAGS.steps):
            print('{0}/{1}:\t Loss: {2:.4f}\t Image loss: {3:.4f}'.format(
                i, flags.FLAGS.steps, loss_value.eval(), img_loss.eval()))
            sess.run(update_op)

        with tf.compat.v1.variable_scope('Generator',
                                         reuse=tf.compat.v1.AUTO_REUSE):
            inverse, g_attn_map_ = feed(latent_, target)
            inverse = inverse.eval()
            g_attn_map_ = g_attn_map_.eval()
            inverse = change_range(inverse, -1, 1, 0, 255).astype(np.uint8)

        if not flags.FLAGS.image_path:
            img = img.eval()

        # Uncomment to visualize saliencies
        # plot_img = change_range(img, -1, 1, 0, 255).astype(np.uint8)[0]
        # visualize_attention(plot_img, g_attn_map_[0])

        img = change_range(img, -1, 1, 0, 255).astype(np.uint8)
    save_img_cv2(
        img[0],
        os.path.join(flags.FLAGS.out_dir, f'real_{flags.FLAGS.seed}.png'))
    for i in range(flags.FLAGS.bs):
        save_img_cv2(
            inverse[i],
            os.path.join(flags.FLAGS.out_dir,
                         f'reconstructed_{flags.FLAGS.seed + i}.png'))