コード例 #1
0
ファイル: util_scripts.py プロジェクト: JcYBalaBalA/GAN-Leaks
def generate_fake_images(model_path,
                         out_dir,
                         num_samples,
                         random_seed=1000,
                         image_shrink=1,
                         minibatch_size=32):
    random_state = np.random.RandomState(random_seed)

    network_pkl = model_path
    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(network_pkl)

    latents = misc.random_latents(num_samples, Gs, random_state)
    labels = np.zeros([latents.shape[0], 0], np.float32)
    images = Gs.run(latents,
                    labels,
                    minibatch_size=minibatch_size,
                    num_gpus=config.num_gpus,
                    out_mul=127.5,
                    out_add=127.5,
                    out_shrink=image_shrink,
                    out_dtype=np.uint8)
    save_dir = misc.make_dir(out_dir)
    misc.save_image_grid(images[:100], os.path.join(save_dir, 'samples.png'),
                         [0, 255], [10, 10])

    img_r01 = images.astype(np.float32) / 255.
    img_r01 = img_r01.transpose(0, 2, 3, 1)  # NCHW => NHWC
    np.savez_compressed(os.path.join(save_dir, 'generated.npz'),
                        noise=latents,
                        img_r01=img_r01)
コード例 #2
0
def generate_fake_images(pkl_path,
                         out_dir,
                         num_pngs,
                         image_shrink=1,
                         random_seed=1000,
                         minibatch_size=1):
    random_state = np.random.RandomState(random_seed)
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)

    print('Loading network...')
    G, D, Gs = misc.load_network_pkl(pkl_path)

    latents = misc.random_latents(num_pngs, Gs, random_state=random_state)
    labels = np.zeros([latents.shape[0], 0], np.float32)
    images = Gs.run(latents,
                    labels,
                    minibatch_size=config.num_gpus * 256,
                    num_gpus=config.num_gpus,
                    out_mul=127.5,
                    out_add=127.5,
                    out_shrink=image_shrink,
                    out_dtype=np.uint8)
    for png_idx in range(num_pngs):
        print('Generating png to %s: %d / %d...' %
              (out_dir, png_idx, num_pngs),
              end='\r')
        if not os.path.exists(
                os.path.join(out_dir, 'ProGAN_%08d.png' % png_idx)):
            misc.save_image_grid(
                images[png_idx:png_idx + 1],
                os.path.join(out_dir, 'ProGAN_%08d.png' % png_idx), [0, 255],
                [1, 1])
    print()
コード例 #3
0
def main():
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    parser = argparse.ArgumentParser(
        description='converter to create pytorch models')
    #parser.add_argument('--id', type=int, required=True,
    #        help='number of model to convert')
    parser.add_argument('--id',
                        type=str,
                        required=True,
                        help='number of model to convert')
    parser.add_argument('--outdir', default=None)
    args = parser.parse_args()

    # Configuration
    snapshot = None  # Default, implies last snapshot

    # Get parameters from checkpoint
    tfutil.init_tf()
    directory = misc.locate_result_subdir(args.id)
    print('Loading snapshot from %s' % directory)
    G, D, Gs = misc.load_network_pkl(args.id, snapshot)
    print(G)
    print(D)
    print(Gs)

    # import pdb; pdb.set_trace()

    # model = from_tf_parameters(Gs.variables)
    model = from_tf_parameters(Gs.vars)
    if args.outdir is None:
        args.outdir = directory
    filename = os.path.join(args.outdir, 'generator.pth')
    print('Saving pytorch model as %s' % filename)
    torch.save(model.state_dict(), filename)
コード例 #4
0
def generate_fake_images_all(run_id,
                             out_dir,
                             num_pngs,
                             image_shrink=1,
                             random_seed=1000,
                             minibatch_size=1,
                             num_pkls=50):
    random_state = np.random.RandomState(random_seed)
    out_dir = os.path.join(out_dir, str(run_id))

    result_subdir = misc.locate_result_subdir(run_id)
    snapshot_pkls = misc.list_network_pkls(result_subdir, include_final=False)
    assert len(snapshot_pkls) >= 1

    for snapshot_idx, snapshot_pkl in enumerate(snapshot_pkls[:num_pkls]):
        prefix = 'network-snapshot-'
        postfix = '.pkl'
        snapshot_name = os.path.basename(snapshot_pkl)
        tmp_dir = os.path.join(out_dir, snapshot_name.split('.')[0])
        if not os.path.isdir(tmp_dir):
            os.makedirs(tmp_dir)
        assert snapshot_name.startswith(prefix) and snapshot_name.endswith(
            postfix)
        snapshot_kimg = int(snapshot_name[len(prefix):-len(postfix)])

        print('Loading network...')
        G, D, Gs = misc.load_network_pkl(snapshot_pkl)

        latents = misc.random_latents(num_pngs, Gs, random_state=random_state)
        labels = np.zeros([latents.shape[0], 0], np.float32)
        images = Gs.run(latents,
                        labels,
                        minibatch_size=config.num_gpus * 32,
                        num_gpus=config.num_gpus,
                        out_mul=127.5,
                        out_add=127.5,
                        out_shrink=image_shrink,
                        out_dtype=np.uint8)
        for png_idx in range(num_pngs):
            print('Generating png to %s: %d / %d...' %
                  (tmp_dir, png_idx, num_pngs),
                  end='\r')
            if not os.path.exists(
                    os.path.join(out_dir, 'ProGAN_%08d.png' % png_idx)):
                misc.save_image_grid(
                    images[png_idx:png_idx + 1],
                    os.path.join(tmp_dir, 'ProGAN_%08d.png' % png_idx),
                    [0, 255], [1, 1])
    print()
コード例 #5
0
# gc.collect()

args.decay_steps *= 0.01 * args.iterations  # Calculate steps as a percent of total iterations

os.makedirs(args.data_dir, exist_ok=True)
os.makedirs(args.mask_dir, exist_ok=True)
os.makedirs(args.generated_images_dir, exist_ok=True)
os.makedirs(args.dlatent_dir, exist_ok=True)
os.makedirs(args.dlabel_dir, exist_ok=True)

# Initialize generator and perceptual model

# load network
network_pkl = misc.locate_network_pkl(args.results_dir)
print('Loading network from "%s"...' % network_pkl)
G, D, Gs = misc.load_network_pkl(args.results_dir, None)

# initiate random input
latents = misc.random_latents(1, Gs, random_state=np.random.RandomState(800))
labels = np.random.rand(1, args.labels_size)

generator = Generator(Gs,
                      labels_size=572,
                      batch_size=1,
                      clipping_threshold=args.clipping_threshold,
                      model_res=args.resolution)

perc_model = None
if (args.use_lpips_loss > 0.00000001):
    with open(args.load_perc_model, "rb") as f:
        perc_model = pickle.load(f)
コード例 #6
0
def test_discriminator(run_id, tensor, snapshot=None):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)
    scores, labels = fp32(D.get_output_for(tensor, is_training=False))
    return (scores, labels)
コード例 #7
0
def recovery(name,pkl_path1,pkl_path2, out_dir, target_latents_dir, \
             num_init=4, num_total_sample=50, minibatch_size = 1, attack=None, denoiseing=False, param=None, loss_func='lpips'):

    print(name)
    print('num_init:' + str(num_init))
    print(f'num_sample:{num_total_sample}')
    print(f'loss_func:{loss_func}')

    # load sorce model
    print('Loading network1...' + pkl_path1)
    _, _, Gs = _misc.load_network_pkl(pkl_path1)

    # load target model
    print('Loading  network2...' + pkl_path2)
    _, _, Gt = _misc.load_network_pkl(pkl_path2)

    proj = projector.Projector(loss_func=loss_func, crop_size=param)
    proj.set_network(Gs, minibatch_size=num_init)

    out_dir = os.path.join(out_dir, name)
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)
        z_init = []
        l2_log = []
        lpips_log = []
        latents_log = []
    else:
        z_init = np.load(os.path.join(out_dir, 'z_init.npy'))
        z_init = [z_init[i] for i in range(len(z_init))]
        l2_log = [
            np.load(os.path.join(out_dir, 'l2.npy'))[i]
            for i in range(len(z_init))
        ]
        lpips_log = [
            np.load(os.path.join(out_dir, 'lpips.npy'))[i]
            for i in range(len(z_init))
        ]
        latents_log = [
            np.load(os.path.join(out_dir, 'z_re.npy'))[i]
            for i in range(len(z_init))
        ]

    #load target z
    assert os.path.exists(target_latents_dir), 'latent_dir not exisit'
    print('using latents:' + target_latents_dir)
    pre_latents = np.load(target_latents_dir)

    start_time = time.time()
    for k in range(len(z_init), num_total_sample):
        #sample target image
        latent = pre_latents[k]
        z_init.append(latent)

        latents = np.zeros((num_init, len(latent)), np.float32)
        latents[:] = latent
        labels = np.zeros([latents.shape[0], 0], np.float32)

        target_images = Gt.get_output_for(latents, labels, is_training=False)
        target_images = tfutil.run(target_images)

        #attack
        if attack is not None:
            target_images = attack(target_images, param)
        if denoiseing:
            target_images = blur(target_images, 3)

        #recovery
        l2_dists = []
        lpips_dists = []
        learned_latents = []
        proj.start(target_images)
        while proj.get_cur_step() < proj.num_steps:
            l2_dists.append(proj.get_l2())
            lpips_dists.append(proj.get_lpips())
            learned_latents.append(proj.get_dlatents())
            proj.step()
        print('epoch:\r%d / %d ... %12f  %12f ' %
              (k, num_total_sample, np.min(
                  proj.get_l2()), np.min(proj.get_lpips()), time.time()))

        l2_log.append(l2_dists)
        lpips_log.append(lpips_dists)
        latents_log.append(learned_latents)

        np.save(out_dir + '/l2', np.array(l2_log))
        np.save(out_dir + '/lpips', np.array(lpips_log))
        np.save(out_dir + '/z_init', np.array(z_init))
        np.save(out_dir + '/z_re', np.array(latents_log))
コード例 #8
0
import numpy as np
import tensorflow as tf

import configWrapper as config
import tfutil
import dataset
import misc
import util_scripts_wrapper as util

#----------------------------------------------------------------------------
# Main entry point.
# Calls the function indicated in config.py.

if __name__ == "__main__":
    misc.init_output_logging()
    np.random.seed(config.random_seed)
    print('Initializing TensorFlow...')
    os.environ.update(config.env)
    tfutil.init_tf(config.tf_config)
    #-----------------
    network_pkl = misc.locate_network_pkl(14, None)
    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(14, None)
    random_state = np.random.RandomState()

    print('Synthesizing images... '
          )  # synthesize images and specify race and gender
    # util.generate_fake_iamges(Gs, D, random_state, <race>, <gender>, <number of images one want to generate>)
    util.generate_fake_images(Gs, D, random_state, '0', '0', 100)
#----------------------------------------------------------------------------
latent_cnt = 512
img_size = 512
label_cnt = 33
step = 0.1
min = -5.0
max = 5.0

misc.init_output_logging()
np.random.seed(config.random_seed)
print('Initializing TensorFlow...')
os.environ.update(config.env)
tfutil.init_tf(config.tf_config)
print('Running %s()...' % config.train['func'])
network_pkl = misc.locate_network_pkl(run_id)
print('Loading network from "%s"...' % network_pkl)
G, D, Gs = misc.load_network_pkl(run_id, snapshot)
if not os.path.isdir("static/generated"):
    os.makedirs("static/generated")


@app.route("/", methods=['GET', 'POST'])
def main_page():
    file_name = "%s.png" % uuid.uuid4()
    path = os.path.join(os.path.dirname(__file__), "static/generated",
                        file_name)

    if 'is_random' not in request.args or request.args["is_random"] == "1":
        np.random.seed(int(time.time()))
        latents = np.random.randn(1, latent_cnt).astype(np.float32)
    else:
        latent_list = []
コード例 #10
0
ファイル: test.py プロジェクト: Syou-MDL/GAN_Attribution
def recovery(name,
             pkl_path1,
             pkl_path2,
             out_dir,
             target_latents_dir,
             num_init=20,
             num_total_sample=100,
             image_shrink=1,
             random_seed=2020,
             minibatch_size=1,
             noise_sigma=0):
    #     misc.init_output_logging()
    #     np.random.seed(random_seed)
    #     print('Initializing TensorFlow...')
    #     os.environ.update(config.env)
    #     tfutil.init_tf(config.tf_config)

    print('num_init:' + str(num_init))

    # load sorce model
    print('Loading network1...' + pkl_path1)
    _, _, G_sorce = misc.load_network_pkl(pkl_path1)

    # load target model
    print('Loading  network2...' + pkl_path2)
    _, _, G_target = misc.load_network_pkl(pkl_path2)

    # load Gt
    Gt = tfutil.Network('Gt',
                        num_samples=num_init,
                        num_channels=3,
                        resolution=128,
                        func='networks.G_recovery')
    latents = misc.random_latents(num_init, Gt, random_state=None)
    labels = np.zeros([latents.shape[0], 0], np.float32)
    Gt.copy_vars_from_with_input(G_target, latents)

    # load Gs
    Gs = tfutil.Network('Gs',
                        num_samples=num_init,
                        num_channels=3,
                        resolution=128,
                        func='networks.G_recovery')
    Gs.copy_vars_from_with_input(G_sorce, latents)

    out_dir = os.path.join(out_dir, name)
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    def G_loss(G, target_images):
        tmp_latents = tfutil.run(G.trainables['Input/weight'])
        G_out = G.get_output_for(tmp_latents, labels, is_training=True)
        G_out = rescale_output(G_out)
        return tf.losses.mean_squared_error(target_images, G_out)

    z_init = []
    z_recovered = []

    #load target z
    if target_latents_dir is not None:
        print('using latents:' + target_latents_dir)
        pre_latents = np.load(target_latents_dir)

    for k in range(num_total_sample):
        result_dir = os.path.join(out_dir, str(k) + '.png')

        #============sample target image
        if target_latents_dir is not None:
            latent = pre_latents[k]
        else:
            latents = misc.random_latents(1, Gs, random_state=None)
            latent = latents[0]
        z_init.append(latent)

        latents = np.zeros((num_init, 512))
        for i in range(num_init):
            latents[i] = latent
        Gt.change_input(inputs=latents)

        #================add_noise
        target_images = Gt.get_output_for(latents, labels, is_training=False)
        target_images_tf = rescale_output(target_images)
        target_images = tfutil.run(target_images_tf)

        target_images_noise = addGaussianNoise(target_images,
                                               sigma=noise_sigma)
        target_images_noise = tf.cast(target_images_noise, dtype='float32')
        target_images = target_images_noise

        #=============select random start point
        latents_2 = misc.random_latents(num_init, Gs, random_state=None)
        Gs.change_input(inputs=latents_2)

        #==============define loss&optimizer
        regularizer = tf.abs(tf.norm(latents_2) - np.sqrt(512))
        loss = G_loss(G=Gs, target_images=target_images)  # + regularizer
        # init_var = OrderedDict([('Input/weight',Gs.trainables['Input/weight'])])
        # decayed_lr = tf.train.exponential_decay(0.1,500, 50, 0.5, staircase=True)
        G_opt = tfutil.Optimizer(name='latent_recovery', learning_rate=0.01)
        G_opt.register_gradients(loss, Gs.trainables)
        G_train_op = G_opt.apply_updates()

        #===========recovery==========
        EPOCH = 500
        losses = []
        losses.append(tfutil.run(loss))
        for i in range(EPOCH):
            G_opt.reset_optimizer_state()
            tfutil.run([G_train_op])

        ########
        learned_latent = tfutil.run(Gs.trainables['Input/weight'])
        result_images = Gs.run(learned_latent,
                               labels,
                               minibatch_size=config.num_gpus * 256,
                               num_gpus=config.num_gpus,
                               out_mul=127.5,
                               out_add=127.5,
                               out_shrink=image_shrink,
                               out_dtype=np.float32)

        sample_losses = []
        tmp_latents = tfutil.run(Gs.trainables['Input/weight'])
        G_out = Gs.get_output_for(tmp_latents, labels, is_training=True)
        G_out = rescale_output(G_out)
        for i in range(num_init):
            loss = tf.losses.mean_squared_error(target_images[i], G_out[i])
            sample_losses.append(tfutil.run(loss))

        #========save best optimized image
        plt.subplot(1, 2, 1)
        plt.imshow(tfutil.run(target_images)[0].transpose(1, 2, 0) / 255.0)
        plt.subplot(1, 2, 2)
        plt.imshow(result_images[np.argmin(sample_losses)].transpose(1, 2, 0) /
                   255.0)
        plt.savefig(result_dir)

        #========store optimized z
        z_recovered.append(tmp_latents)

        #=========save losses
        #         loss=min(sample_losses)

        with open(out_dir + "/losses.txt", "a") as f:
            for loss in sample_losses:
                f.write(str(loss) + ' ')
            f.write('\n')
        np.save(out_dir + '/z_init', np.array(z_init))
        np.save(out_dir + '/z_re', np.array(z_recovered))
コード例 #11
0
def classify(model_path, testing_data_path):

    labels_1 = [
        'CelebA_real_data', 'ProGAN_generated_data', 'SNGAN_generated_data',
        'CramerGAN_generated_data', 'MMDGAN_generated_data'
    ]
    labels_2 = [
        'CelebA_real_data', 'ProGAN_seed_0_generated_data ',
        'ProGAN_seed_1_generated_data', 'ProGAN_seed_2_generated_data',
        'ProGAN_seed_3_generated_data', 'ProGAN_seed_4_generated_data',
        'ProGAN_seed_5_generated_data', 'ProGAN_seed_6_generated_data',
        'ProGAN_seed_7_generated_data', 'ProGAN_seed_8_generated_data',
        'ProGAN_seed_9_generated_data'
    ]

    print('Loading network...')
    C_im = misc.load_network_pkl(model_path)

    if testing_data_path.endswith('.png') or testing_data_path.endswith(
            '.jpg'):
        im = np.array(PIL.Image.open(testing_data_path)).astype(
            np.float32) / 255.0
        if len(im.shape) < 3:
            im = np.dstack([im, im, im])
        if im.shape[2] == 4:
            im = im[:, :, :3]
        if im.shape[0] != 128:
            im = skimage.transform.resize(im, (128, 128))
        im = np.transpose(misc.adjust_dynamic_range(im, [0, 1], [-1, 1]),
                          axes=[2, 0, 1])
        im = np.reshape(im, [1] + list(im.shape))
        logits = C_im.run(im,
                          minibatch_size=1,
                          num_gpus=1,
                          out_dtype=np.float32)
        idx = np.argmax(np.squeeze(logits))
        if logits.shape[1] == len(labels_1):
            labels = list(labels_1)
        elif logits.shape[1] == len(labels_2):
            labels = list(labels_2)
        print('The input image is predicted as being sampled from %s' %
              labels[idx])

    elif os.path.isdir(testing_data_path):
        count_dict = None
        name_list = sorted(os.listdir(testing_data_path))
        length = len(name_list)
        for (count0, name) in enumerate(name_list):
            im = np.array(PIL.Image.open('%s/%s' %
                                         (testing_data_path, name))).astype(
                                             np.float32) / 255.0
            if len(im.shape) < 3:
                im = np.dstack([im, im, im])
            if im.shape[2] == 4:
                im = im[:, :, :3]
            if im.shape[0] != 128:
                im = skimage.transform.resize(im, (128, 128))
            im = np.transpose(misc.adjust_dynamic_range(im, [0, 1], [-1, 1]),
                              axes=[2, 0, 1])
            im = np.reshape(im, [1] + list(im.shape))
            logits = C_im.run(im,
                              minibatch_size=1,
                              num_gpus=1,
                              out_dtype=np.float32)
            idx = np.argmax(np.squeeze(logits))
            if logits.shape[1] == len(labels_1):
                labels = list(labels_1)
            elif logits.shape[1] == len(labels_2):
                labels = list(labels_2)
            if count_dict is None:
                count_dict = {}
                for label in labels:
                    count_dict[label] = 0
            count_dict[labels[idx]] += 1
            print(
                'Classifying %d/%d images: %s: predicted as being sampled from %s'
                % (count0, length, name, labels[idx]))
        for label in labels:
            print(
                'The percentage of images sampled from %s is %d/%d = %.2f%%' %
                (label, count_dict[label], length,
                 float(count_dict[label]) / float(length) * 100.0))