예제 #1
0
def sample_graph():
    # ======================================
    # =               graph                =
    # ======================================

    if not os.path.exists(py.join(output_dir, args_.generator_pb)):
        # model
        Genc, Gdec, _ = module.get_model(args.model, n_atts, weight_decay=args.weight_decay)

        # placeholders & inputs
        xa = tf.placeholder(tf.float32, shape=[None, args.crop_size, args.crop_size, 3])
        b_ = tf.placeholder(tf.float32, shape=[None, n_atts])

        # sample graph
        x = Gdec(Genc(xa, training=False), b_, training=False)
    else:
        # load freezed model
        with tf.gfile.GFile(py.join(output_dir, args_.generator_pb), 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='generator')

        # placeholders & inputs
        xa = sess.graph.get_tensor_by_name('generator/xa:0')
        b_ = sess.graph.get_tensor_by_name('generator/b_:0')

        # sample graph
        x = sess.graph.get_tensor_by_name('generator/xb:0')

    # ======================================
    # =            run function            =
    # ======================================

    save_dir = '%s/output/%s/samples_testing_slide/%s_%s_%s_%s' % \
        (args.flask_path, args.experiment_name, args.test_att_name, '{:g}'.format(args.test_int_min), '{:g}'.format(args.test_int_max), '{:g}'.format(args.test_int_step))
    py.mkdir(save_dir)

    def run():
        cnt = 0
        for _ in tqdm.trange(len_test_dataset):
            # data for sampling
            xa_ipt, a_ipt = sess.run(test_iter.get_next())
            b_ipt = np.copy(a_ipt)
            b__ipt = (b_ipt * 2 - 1).astype(np.float32)  # !!!

            x_opt_list = [xa_ipt]
            for test_int in np.arange(args.test_int_min, args.test_int_max + 1e-5, args.test_int_step):
                b__ipt[:, args.att_names.index(args.test_att_name)] = test_int
                x_opt = sess.run(x, feed_dict={xa: xa_ipt, b_: b__ipt})
                x_opt_list.append(x_opt)
            sample = np.transpose(x_opt_list, (1, 2, 0, 3, 4))
            sample = np.reshape(sample, (sample.shape[0], -1, sample.shape[2] * sample.shape[3], sample.shape[4]))

            for s in sample:
                cnt += 1
                im.imwrite(s, '%s/%d.jpg' % (save_dir, cnt))

    return run
예제 #2
0
    def construct_model(self, crop_size, load_size):
        self.A_B_dataset, len_dataset = data.make_zip_dataset(
            self.A_img_paths,
            self.B_img_paths,
            self.batch_size,
            load_size,
            crop_size,
            training=True,
            repeat=False,
            is_gray_scale=(self.color_depth == 1))
        self.len_dataset = len_dataset

        self.A2B_pool = data.ItemPool(self.pool_size)
        self.B2A_pool = data.ItemPool(self.pool_size)
        A_img_paths_test = py.glob(
            py.join(self.datasets_dir, self.dataset, 'testA'),
            '*.{}'.format(self.image_ext))
        B_img_paths_test = py.glob(
            py.join(self.datasets_dir, self.dataset, 'testB'),
            '*.{}'.format(self.image_ext))
        A_B_dataset_test, _ = data.make_zip_dataset(
            A_img_paths_test,
            B_img_paths_test,
            self.batch_size,
            load_size,
            crop_size,
            training=False,
            repeat=True,
            is_gray_scale=(self.color_depth == 1))
        self.test_iter = iter(A_B_dataset_test)
        self.G_A2B = module.ResnetGenerator(input_shape=(crop_size, crop_size,
                                                         self.color_depth),
                                            output_channels=self.color_depth)
        self.G_B2A = module.ResnetGenerator(input_shape=(crop_size, crop_size,
                                                         self.color_depth),
                                            output_channels=self.color_depth)
        self.D_A = module.ConvDiscriminator(input_shape=(crop_size, crop_size,
                                                         self.color_depth))
        self.D_B = module.ConvDiscriminator(input_shape=(crop_size, crop_size,
                                                         self.color_depth))
        self.d_loss_fn, self.g_loss_fn = gan.get_adversarial_losses_fn(
            self.adversarial_loss_mode)
        self.cycle_loss_fn = tf.losses.MeanAbsoluteError()
        self.identity_loss_fn = tf.losses.MeanAbsoluteError()
        self.G_lr_scheduler = module.LinearDecay(
            self.lr, self.epochs * self.len_dataset,
            self.epoch_decay * self.len_dataset)
        self.D_lr_scheduler = module.LinearDecay(
            self.lr, self.epochs * self.len_dataset,
            self.epoch_decay * self.len_dataset)
        self.G_optimizer = keras.optimizers.Adam(
            learning_rate=self.G_lr_scheduler, beta_1=self.beta_1)
        self.D_optimizer = keras.optimizers.Adam(
            learning_rate=self.D_lr_scheduler, beta_1=self.beta_1)
예제 #3
0
def make_celeba_dataset(img_dir,
                        label_path,
                        att_names,
                        batch_size,
                        load_size=286,
                        crop_size=256,
                        training=True,
                        drop_remainder=True,
                        shuffle=True,
                        repeat=1):
    img_names = np.genfromtxt(label_path, dtype=str, usecols=0)
    img_paths = np.array(
        [py.join(img_dir, img_name) for img_name in img_names])
    labels = np.genfromtxt(label_path, dtype=int, usecols=range(1, 13))
    labels = labels[:, np.array([ATT_ID[att_name] for att_name in att_names])]

    if shuffle:
        idx = np.random.permutation(len(img_paths))
        img_paths = img_paths[idx]
        labels = labels[idx]

    if training:

        def map_fn_(img, label):
            img = tf.image.resize(img, [load_size, load_size])
            # img = tl.random_rotate(img, 5)
            img = tf.image.random_flip_left_right(img)
            img = tf.image.random_crop(img, [crop_size, crop_size, 3])
            # img = tl.color_jitter(img, 25, 0.2, 0.2, 0.1)
            # img = tl.random_grayscale(img, p=0.3)
            img = tf.clip_by_value(img, 0, 255) / 127.5 - 1
            label = (label + 1) // 2
            return img, label
    else:

        def map_fn_(img, label):
            img = tf.image.resize(img, [load_size, load_size])
            img = tl.center_crop(img, size=crop_size)
            img = tf.clip_by_value(img, 0, 255) / 127.5 - 1
            label = (label + 1) // 2
            return img, label

    dataset = tl.disk_image_batch_dataset(img_paths,
                                          batch_size,
                                          labels=labels,
                                          drop_remainder=drop_remainder,
                                          map_fn=map_fn_,
                                          shuffle=shuffle,
                                          repeat=repeat)

    if drop_remainder:
        len_dataset = len(img_paths) // batch_size
    else:
        len_dataset = int(np.ceil(len(img_paths) / batch_size))

    return dataset, len_dataset
예제 #4
0
 def set_checkpoints(self):
     self.ep_cnt = tf.Variable(initial_value=0,
                               trainable=False,
                               dtype=tf.int64)
     # checkpoint
     self.checkpoint = tl.Checkpoint(dict(G_A2B=self.G_A2B,
                                          G_B2A=self.G_B2A,
                                          D_A=self.D_A,
                                          D_B=self.D_B,
                                          G_optimizer=self.G_optimizer,
                                          D_optimizer=self.D_optimizer,
                                          ep_cnt=self.ep_cnt),
                                     py.join(self.output_dataset_dir,
                                             'checkpoints'),
                                     max_to_keep=5)
     try:  # restore checkpoint including the epoch counter
         self.checkpoint.restore().assert_existing_objects_matched()
     except Exception as e:
         self.logger.warn(e)
예제 #5
0
 def snapshot(self, A, B, image_file_name, debug=False):
     A2B, B2A, A2B2A, B2A2B = self.sample(A, B)
     img = im.immerge(np.concatenate([A, A2B, A2B2A, B, B2A, B2A2B],
                                     axis=0),
                      n_rows=2)
     im.imwrite(
         img,
         py.join(self.sample_dir,
                 image_file_name % self.G_optimizer.iterations.numpy()))
     buffer = io.BytesIO()
     if self.color_depth == 1:
         pyplot.imshow(img.reshape(img.shape[0], img.shape[1]), cmap='gray')
     else:
         pyplot.imshow(img)
     pyplot.savefig(buffer, format='png')
     if debug:
         buffer.seek(0)
         pyplot.imread(buffer)
         pyplot.show()
     return buffer
예제 #6
0
py.arg('--pool_size', type=int, default=50)  # pool size to store fake samples
py.arg('--grayscale', type=bool, default=False)
py.arg('--triplet_margin', type=float, default=1.0)
py.arg('--evaluate_every', type=int, default=500)
args = py.args()

params = vars(args)
#neptune.create_experiment(name=args.experiment_name,params=params)
#neptune.append_tag('cycleGAN')
# output_dir
output_dir = os.path.join(
    args.outdir, args.experiment_name)  #py.join('output', args.outdir)
os.makedirs(output_dir, exist_ok=True)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)

# ==============================================================================
# =                                    data                                    =
# ==============================================================================
A = pd.read_csv(args.train_datasetA)
B = pd.read_csv(args.train_datasetB)
print(B)
A_img_paths = list(
    A['file_name']
)  #py.glob(py.join(args.datasets_dir, args.dataset, 'trainA'), '*.jpg')
A_labels = list(A['label'])
print(type(A_img_paths[0]))
B_img_paths = list(
    B['file_name']
)  #py.glob(py.join(args.datasets_dir, args.dataset, 'trainB'), '*.jpg')
예제 #7
0
py.arg('--gradient_penalty_mode', choices=['none', '1-gp', '0-gp', 'lp'], default='1-gp')
py.arg('--gradient_penalty_sample_mode', choices=['line', 'real', 'fake', 'dragan'], default='line')
py.arg('--d_gradient_penalty_weight', type=float, default=10.0)
py.arg('--d_attribute_loss_weight', type=float, default=1.0)
py.arg('--g_attribute_loss_weight', type=float, default=10.0)
py.arg('--g_reconstruction_loss_weight', type=float, default=100.0)
py.arg('--weight_decay', type=float, default=0.0)

py.arg('--n_samples', type=int, default=12)
py.arg('--test_int', type=float, default=2.0)

py.arg('--experiment_name', default='default')
args = py.args()

# output_dir
output_dir = py.join('output', args.experiment_name)
py.mkdir(output_dir)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)

# others
n_atts = len(args.att_names)


train_dataset, len_train_dataset = data.make_celeba_dataset(args.img_dir, args.train_label_path, args.att_names, args.batch_size,
                                                            load_size=args.load_size, crop_size=args.crop_size,
                                                            training=True, shuffle=False, repeat=None)
print(len_train_dataset)
print(train_dataset)
       default='line',
       choices=['line', 'real', 'fake', 'dragan'])
py.arg('--gradient_penalty_weight', type=float, default=10.0)
py.arg('--experiment_name', default='none')
py.arg('--gradient_penalty_d_norm',
       default='layer_norm',
       choices=['instance_norm', 'layer_norm'])  # !!!
args = py.args()

# output_dir
if args.experiment_name == 'none':
    args.experiment_name = '%s_%s' % (args.dataset, args.adversarial_loss_mode)
    if args.gradient_penalty_mode != 'none':
        args.experiment_name += '_%s_%s' % (args.gradient_penalty_mode,
                                            args.gradient_penalty_sample_mode)
output_dir = py.join('output', args.experiment_name)
py.mkdir(output_dir)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)

# others
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")

# ==============================================================================
# =                                    data                                    =
# ==============================================================================

# setup dataset
if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']:  # 32x32
예제 #9
0
py.arg('--crop_size', type=int, default=256)  # then crop to this size
py.arg('--batch_size', type=int, default=1)
py.arg('--epochs', type=int, default=200)
py.arg('--epoch_decay', type=int, default=100)  # epoch to start decaying learning rate
py.arg('--lr', type=float, default=0.0002)
py.arg('--beta_1', type=float, default=0.5)
py.arg('--adversarial_loss_mode', default='lsgan', choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'])
py.arg('--gradient_penalty_mode', default='none', choices=['none', 'dragan', 'wgan-gp'])
py.arg('--gradient_penalty_weight', type=float, default=10.0)
py.arg('--cycle_loss_weight', type=float, default=10.0)
py.arg('--identity_loss_weight', type=float, default=0.0)
py.arg('--pool_size', type=int, default=50)  # pool size to store fake samples
args = py.args()

# output_dir
output_dir = py.join('output', args.dataset)
py.mkdir(output_dir)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)


# ==============================================================================
# =                                    data                                    =
# ==============================================================================

A_img_paths = py.glob(py.join(args.datasets_dir, args.dataset, 'trainA'), '*.jpg')
B_img_paths = py.glob(py.join(args.datasets_dir, args.dataset, 'trainB'), '*.jpg')
A_B_dataset, len_dataset = data.make_zip_dataset(A_img_paths, B_img_paths, args.batch_size, args.load_size, args.crop_size, training=True, repeat=False)

A2B_pool = data.ItemPool(args.pool_size)
예제 #10
0
def sample_graph():
    # ======================================
    # =               graph                =
    # ======================================

    if not os.path.exists(py.join(output_dir, 'generator.pb')):
        # model
        Genc, Gdec, _ = module.get_model(args.model,
                                         n_atts,
                                         weight_decay=args.weight_decay)

        # placeholders & inputs
        xa = tf.placeholder(tf.float32,
                            shape=[None, args.crop_size, args.crop_size, 3])
        b_ = tf.placeholder(tf.float32, shape=[None, n_atts])

        # sample graph
        x = Gdec(Genc(xa, training=False), b_, training=False)
    else:
        # load freezed model
        with tf.gfile.GFile(py.join(output_dir, 'generator.pb'), 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='generator')

        # placeholders & inputs
        xa = sess.graph.get_tensor_by_name('generator/xa:0')
        b_ = sess.graph.get_tensor_by_name('generator/b_:0')

        # sample graph
        x = sess.graph.get_tensor_by_name('generator/xb:0')

    # ======================================
    # =            run function            =
    # ======================================

    save_dir = '%s/output/%s/samples_testing_%s' % (
        args.flask_path, args.experiment_name, '{:g}'.format(args.test_int))
    py.mkdir(save_dir)

    def run():
        cnt = 0
        for _ in tqdm.trange(len_test_dataset):
            # data for sampling
            xa_ipt, a_ipt = sess.run(test_iter.get_next())
            b_ipt_list = [a_ipt]  # the first is for reconstruction
            for i in range(n_atts):
                tmp = np.array(a_ipt, copy=True)
                tmp[:, i] = 1 - tmp[:, i]  # inverse attribute
                tmp = data.check_attribute_conflict(tmp, args.att_names[i],
                                                    args.att_names)
                b_ipt_list.append(tmp)

            x_opt_list = [xa_ipt]
            for i, b_ipt in enumerate(b_ipt_list):
                b__ipt = (b_ipt * 2 - 1).astype(np.float32)  # !!!
                if i > 0:  # i == 0 is for reconstruction
                    b__ipt[..., i - 1] = b__ipt[..., i - 1] * args.test_int
                x_opt = sess.run(x, feed_dict={xa: xa_ipt, b_: b__ipt})
                x_opt_list.append(x_opt)
            sample = np.transpose(x_opt_list, (1, 2, 0, 3, 4))
            sample = np.reshape(sample, (sample.shape[0], -1, sample.shape[2] *
                                         sample.shape[3], sample.shape[4]))

            for s in sample:
                cnt += 1
                im.imwrite(s, '%s/%d.jpg' % (save_dir, cnt))

    return run
예제 #11
0
py.arg('--lr', type=float, default=0.0002)
py.arg('--beta_1', type=float, default=0.5)
py.arg('--n_d', type=int, default=1)  # # d updates per g update
py.arg('--z_dim', type=int, default=128)
py.arg('--adversarial_loss_mode', default='gan', choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'])
py.arg('--gradient_penalty_mode', default='none', choices=['none', 'dragan', 'wgan-gp'])
py.arg('--gradient_penalty_weight', type=float, default=10.0)
py.arg('--experiment_name', default='none')
args = py.args()

# output_dir
if args.experiment_name == 'none':
    args.experiment_name = '%s_%s' % (args.dataset, args.adversarial_loss_mode)
    if args.gradient_penalty_mode != 'none':
        args.experiment_name += '_%s' % args.gradient_penalty_mode
output_dir = py.join('output', '%s_BN%d_DPG%d' % (args.experiment_name, args.batch_size, args.n_d ) )
py.mkdir(output_dir)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)


# ==============================================================================
# =                               data and model                               =
# ==============================================================================

# setup dataset
if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']:  # 32x32
    dataset, shape, len_dataset = data.make_32x32_dataset(args.dataset, args.batch_size)
    n_G_upsamplings = n_D_downsamplings = 3
예제 #12
0
py.arg('--lr', type=float, default=0.0002)
py.arg('--beta_1', type=float, default=0.5)
py.arg('--adversarial_loss_mode',
       default='lsgan',
       choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'])
py.arg('--gradient_penalty_mode',
       default='none',
       choices=['none', 'dragan', 'wgan-gp'])
py.arg('--gradient_penalty_weight', type=float, default=10.0)
py.arg('--cycle_loss_weight', type=float, default=10.0)
py.arg('--identity_loss_weight', type=float, default=0.0)
py.arg('--pool_size', type=int, default=50)  # pool size to store fake samples
args = py.args()

# output_dir
output_dir = py.join('output', args.dataset)
py.mkdir(output_dir)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)

# ==============================================================================
# =                                    data                                    =
# ==============================================================================

A_img_paths = py.glob(py.join(args.datasets_dir, args.dataset, 'trainA'),
                      '*.jpg')
B_img_paths = py.glob(py.join(args.datasets_dir, args.dataset, 'trainB'),
                      '*.jpg')
A_B_dataset, len_dataset = data.make_zip_dataset(A_img_paths,
                                                 B_img_paths,
예제 #13
0
def train_CycleGAN():

    import logGPU_RAM

    # summary
    train_summary_writer = tf.summary.create_file_writer(
        py.join(output_dir, 'summaries', 'train'))
    logGPU_RAM.init_gpu_writers(py.join(output_dir, 'summaries', 'GPUs'))

    # sample
    test_iter = iter(A_B_dataset_test)
    sample_dir = py.join(output_dir, 'samples_training')
    py.mkdir(sample_dir)

    test_sample = next(test_iter)

    # timeing
    import time
    start_time = time.time()

    # main loop
    with train_summary_writer.as_default():
        for ep in tqdm.trange(args.epochs, desc='Epoch Loop'):
            if ep < ep_cnt:
                continue

            # update epoch counter
            ep_cnt.assign_add(1)

            # train for an epoch
            for A, B in tqdm.tqdm(A_B_dataset,
                                  desc='Inner Epoch Loop',
                                  total=len_dataset):
                G_loss_dict, D_loss_dict = train_step(A, B)

                iteration = G_optimizer.iterations.numpy()

                # # summary
                tl.summary(G_loss_dict, step=iteration, name='G_losses')
                tl.summary(D_loss_dict, step=iteration, name='D_losses')
                tl.summary(
                    {'learning rate': G_lr_scheduler.current_learning_rate},
                    step=iteration,
                    name='learning rate')
                tl.summary(
                    {'second since start': np.array(time.time() - start_time)},
                    step=iteration,
                    name='second_Per_Iteration')
                logGPU_RAM.log_gpu_memory_to_tensorboard()

                # sample
                if iteration % 1000 == 0:
                    A, B = next(test_iter)
                    A2B, B2A, A2B2A, B2A2B = sample(A, B)
                    img = im.immerge(np.concatenate(
                        [A, A2B, A2B2A, B, B2A, B2A2B], axis=0),
                                     n_rows=2)
                    im.imwrite(
                        img,
                        py.join(sample_dir,
                                'iter-%09d-sample-test-random.jpg' %
                                iteration))
                if iteration % 100 == 0:
                    A, B = test_sample
                    A2B, B2A, A2B2A, B2A2B = sample(A, B)
                    img = im.immerge(np.concatenate(
                        [A, A2B, A2B2A, B, B2A, B2A2B], axis=0),
                                     n_rows=2)
                    im.imwrite(
                        img,
                        py.join(
                            sample_dir,
                            'iter-%09d-sample-test-specific.jpg' % iteration))
            # save checkpoint
            checkpoint.save(ep)
예제 #14
0
           default='none',
           choices=['none', 'dragan', 'wgan-gp'])
    py.arg('--gradient_penalty_weight', type=float, default=10.0)
    py.arg('--cycle_loss_weight', type=float, default=10.0)
    py.arg('--identity_loss_weight', type=float, default=0.0)
    py.arg('--pool_size', type=int,
           default=50)  # pool size to store fake samples
    py.arg('--new-run', type=bool, default=False)
    py.arg('--run-id', type=int, default=False)
    py.arg('--checkpoint-path', type=str, default=None)
    args = py.args()
else:
    from config import args

# output_dir
output_dir = py.join('output', args.dataset)
runs = glob.glob(py.join(output_dir, "*"))
if len(runs) == 0:
    run_id = 0
else:
    run_id = np.array([py.split(d)[-2] for d in runs]).astype(np.int32).max()
    if args.new_run:
        run_id += 1
    if args.run_id is not False:
        run_id = args.run_id
output_dir = py.join(output_dir, f'{run_id:04d}')

py.mkdir(output_dir)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")

py.mkdir(args.out_dir)

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])

img_paths = 'data/imagenet_small/train'
data_loader, shape = data.make_custom_dataset(img_paths,
                                              1,
                                              resize=32,
                                              pin_memory=use_gpu)
num_samples = args.num_samples_per_class
out_path = py.join(args.out_dir, args.output_dir)
os.makedirs(out_path, exist_ok=True)

while (num_samples > 0):
    for (x_real, labels) in iter(data_loader):
        if num_samples > 0:
            x_real = np.transpose(x_real.data.cpu().numpy(), (0, 2, 3, 1))
            img = im.immerge(x_real, n_rows=1).squeeze()
            im.imwrite(
                img,
                py.join(out_path, 'img-%d-%d.jpg' % (num_samples, args.num)))
            num_samples -= 1
            print('saving ', num_samples)
예제 #16
0
import pylib as py
import tensorflow as tf
import tf2lib as tl
import pandas as pd
import data
import module

# ==============================================================================
# =                                   param                                    =
# ==============================================================================

py.arg('--experiment_dir')
py.arg('--batch_size', type=int, default=1)

test_args = py.args()
args = py.args_from_yaml(py.join(test_args.experiment_dir, 'settings.yml'))
print(args)
args.__dict__.update(test_args.__dict__)

# ==============================================================================
# =                                    test                                    =
# ==============================================================================

# data'/users/irodri15/scratch/Fossils/Experiments/softmax_triplet/datasets/gan_fossils_leaves/test_gan_fossils.csv'

A_test = pd.read_csv(
    '/users/irodri15/data/irodri15/Fossils/Experiments/datasets/gan_fossils_leaves_v1/fossils_train_oscar_processed.csv'
)  #'/users/irodri15/scratch/Fossils/Experiments/softmax_triplet/datasets/gan_fossils_leaves/test_gan_fossils.csv')
B_test = pd.read_csv(
    '/users/irodri15/data/irodri15/Fossils/Experiments/datasets/gan_fossils_leaves_v1/leaves_train_oscar_processed.csv'
)  #/users/irodri15/scratch/Fossils/Experiments/softmax_triplet/datasets/gan_fossils_leaves/test_gan_leaves.csv')
def traversal_graph():

    # ======================================
    # =               graph                =
    # ======================================

    if not os.path.exists(py.join(output_dir, 'generator.pb')):
        # model
        G_test = functools.partial(
            module.G(scope='G_test'),
            n_channels=args.n_channels,
            use_gram_schmidt=args.g_loss_weight_orth_loss == 0,
            training=False)

        # placeholders & inputs
        zs = [
            tf.placeholder(dtype=tf.float32, shape=[args.n_traversal, z_dim])
            for z_dim in args.z_dims
        ]
        eps = tf.placeholder(dtype=tf.float32,
                             shape=[args.n_traversal, args.eps_dim])

        # generate
        x_f = G_test(zs, eps, training=False)

        L = tl.tensors_filter(G_test.func.variables, 'L')
    else:
        # load freezed model
        with tf.gfile.GFile(py.join(output_dir, 'generator.pb'), 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='generator')

        # placeholders & inputs
        zs = [
            sess.graph.get_tensor_by_name('generator/z_%d:0' % i)
            for i in range(len(args.z_dims))
        ]
        eps = sess.graph.get_tensor_by_name('generator/eps:0')

        # sample graph
        x_f = sess.graph.get_tensor_by_name('generator/x_f:0')

        L = [
            sess.graph.get_tensor_by_name('generator/L_%d:0' % i)
            for i in range(len(args.z_dims))
        ]

    # ======================================
    # =            run function            =
    # ======================================

    save_dir = './output/%s/samples_testing/traversal/all_dims/traversal_%d_%.2f' % (
        args.experiment_name, args.n_traversal_point,
        args.truncation_threshold)
    py.mkdir(save_dir)

    def run():
        zs_ipt_fixed = [
            scipy.stats.truncnorm.rvs(-args.truncation_threshold,
                                      args.truncation_threshold,
                                      size=[args.n_traversal, z_dim])
            for z_dim in args.z_dims
        ]
        eps_ipt = scipy.stats.truncnorm.rvs(
            -args.truncation_threshold,
            args.truncation_threshold,
            size=[args.n_traversal, args.eps_dim])

        left = -4.5
        right = 4.5
        for layer_idx in range(len(args.z_dims)):
            for eigen_idx in range(args.z_dims[layer_idx]):
                L_opt = sess.run(L)
                l = layer_idx
                j = eigen_idx
                i = np.argsort(np.abs(L_opt[l]))[::-1][j]

                x_f_opts = []
                vals = np.linspace(left, right, args.n_traversal_point)
                for v in vals:
                    zs_ipt = copy.deepcopy(zs_ipt_fixed)
                    zs_ipt[l][:, i] = v
                    feed_dict = {z: z_ipt for z, z_ipt in zip(zs, zs_ipt)}
                    feed_dict.update({eps: eps_ipt})
                    x_f_opt = sess.run(x_f, feed_dict=feed_dict)
                    x_f_opts.append(x_f_opt)

                sample = np.concatenate(x_f_opts, axis=2)
                for ii in range(args.n_traversal):
                    im.imwrite(
                        sample[ii], '%s/%04d_%d-%d-%.3f-%d.jpg' %
                        (save_dir, ii, l, j, np.abs(L_opt[l][i]), i))

    return run
예제 #18
0
import numpy as np
import pylib as py
import tensorflow as tf
import tf2lib as tl

import data
import module, os

# ==============================================================================
# =                                   param                                    =
# ==============================================================================

py.arg('--experiment_dir')
py.arg('--batch_size', type=int, default=32)
test_args = py.args()
args = py.args_from_yaml(py.join(test_args.experiment_dir, 'settings.yml'))
args.__dict__.update(test_args.__dict__)

# ==============================================================================
# =                                    test                                    =
# ==============================================================================

# data
# A_img_paths_test = py.glob(py.join(args.datasets_dir, args.dataset, 'testA'), '*.jpg')
# B_img_paths_test = py.glob(py.join(args.datasets_dir, args.dataset, 'testB'), '*.jpg')
# A_dataset_test = data.make_dataset(A_img_paths_test, args.batch_size, args.load_size, args.crop_size,
#                                    training=False, drop_remainder=False, shuffle=False, repeat=1)
# B_dataset_test = data.make_dataset(B_img_paths_test, args.batch_size, args.load_size, args.crop_size,
#                                    training=False, drop_remainder=False, shuffle=False, repeat=1)

lve_imgpaths = tf.io.gfile.glob(
예제 #19
0
import numpy as np
import pylib as py
import tensorflow as tf
import tf2lib as tl
import pandas as pd
import data
import module

# ==============================================================================
# =                                   param                                    =
# ==============================================================================

py.arg('--experiment_dir')
py.arg('--batch_size', type=int, default=32)
test_args = py.args()
args = py.args_from_yaml(py.join(test_args.experiment_dir, 'settings.yml'))
print(args)
args.__dict__.update(test_args.__dict__)


# ==============================================================================
# =                                    test                                    =
# ==============================================================================

# data
A_test = pd.read_csv(args.test_datasetA)
B_test = pd.read_csv(args.test_datasetB)
A_img_paths_test = list(A_test['file_name'])
B_img_paths_test = list(B_test['file_name'])
A_test_labels = list(A_test['label'])
B_test_labels = list(B_test['label'])
예제 #20
0
import numpy as np
import pylib as py
import tensorflow as tf
import tf2lib as tl

import data
import module

# ==============================================================================
# =                                   param                                    =
# ==============================================================================

py.arg('--experiment_dir')
py.arg('--batch_size', type=int, default=32)
test_args = py.args()
args = py.args_from_yaml(py.join(test_args.experiment_dir, 'settings.yml'))
args.__dict__.update(test_args.__dict__)

# ==============================================================================
# =                                    test                                    =
# ==============================================================================

# data
A_img_paths_test = py.glob(py.join(args.datasets_dir, args.dataset, 'testA'),
                           '*.jpg')
B_img_paths_test = py.glob(py.join(args.datasets_dir, args.dataset, 'testB'),
                           '*.jpg')
A_dataset_test = data.make_dataset(A_img_paths_test,
                                   args.batch_size,
                                   args.load_size,
                                   args.crop_size,
예제 #21
0
       choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'])
py.arg('--gradient_penalty_mode',
       default='none',
       choices=['none', 'dragan', 'wgan-gp'])
py.arg('--gradient_penalty_weight', type=float, default=10.0)
py.arg('--cycle_loss_weight', type=float, default=10.0)
py.arg('--identity_loss_weight', type=float, default=0.1)
py.arg('--pool_size', type=int, default=50)  # pool size to store fake samples
args = py.args()

# output_dir
output_dir = "gdrive/My Drive/output"
#py.mkdir(output_dir)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)

# ==============================================================================
# =                                    data                                    =
# ==============================================================================

A_img_paths = glob.glob("./Images/*/*.jpg")
B_img_paths = glob.glob('./dataset_pokemon/dataset/*/*.jpg')
A_B_dataset, len_dataset = data.make_zip_dataset(A_img_paths,
                                                 B_img_paths,
                                                 args.batch_size,
                                                 args.load_size,
                                                 args.crop_size,
                                                 training=True,
                                                 repeat=False)
예제 #22
0
def sample_graph():
    # ======================================
    # =               graph                =
    # ======================================

    if not os.path.exists(py.join(output_dir, 'generator.pb')):
        # model
        G = functools.partial(module.PAGANG(), dim=args.dim, weight_decay=args.weight_decay)

        # placeholders & inputs
        xa = tf.placeholder(tf.float32, shape=[None, args.crop_size, args.crop_size, 3])
        a_ = tf.placeholder(tf.float32, shape=[None, n_atts])
        b_ = tf.placeholder(tf.float32, shape=[None, n_atts])

        # sample graph
        x, e, ms, _ = G(xa, b_ - a_, training=False)
    else:
        # load freezed model
        with tf.gfile.GFile(py.join(output_dir, 'generator.pb'), 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='generator')

        # placeholders & inputs
        xa = sess.graph.get_tensor_by_name('generator/xa:0')
        a_ = sess.graph.get_tensor_by_name('generator/a_:0')
        b_ = sess.graph.get_tensor_by_name('generator/b_:0')

        # sample graph
        x = sess.graph.get_tensor_by_name('generator/xb:0')
        e = sess.graph.get_tensor_by_name('generator/e:0')
        ms = sess.graph.get_operation_by_name('generator/ms').outputs

    # ======================================
    # =            run function            =
    # ======================================

    if args.with_mask:
        save_dir = './output/%s/samples_testing_multi_with_mask' % args.experiment_name
    else:
        save_dir = './output/%s/samples_testing_multi' % args.experiment_name
    tmp = ''
    for test_att_name, test_int in zip(args.test_att_names, args.test_ints):
        tmp += '_%s_%s' % (test_att_name, '{:g}'.format(test_int))
    save_dir = py.join(save_dir, tmp[1:])
    py.mkdir(save_dir)

    def run():
        cnt = 0
        for _ in tqdm.trange(len_test_dataset):
            # data for sampling
            xa_ipt, a_ipt = sess.run(test_iter.get_next())
            b_ipt = np.copy(a_ipt)
            for test_att_name in args.test_att_names:
                i = args.att_names.index(test_att_name)
                b_ipt[..., i] = 1 - b_ipt[..., i]
                b_ipt = data.check_attribute_conflict(b_ipt, test_att_name, args.att_names)

            a__ipt = a_ipt * 2 - 1
            b__ipt = (b_ipt * 2 - 1).astype(np.float32)  # !!!
            for test_att_name, test_int in zip(args.test_att_names, args.test_ints):
                i = args.att_names.index(test_att_name)
                b__ipt[..., i] = b__ipt[..., i] * test_int

            x_opt_list = [xa_ipt]
            e_opt_list = [np.full_like(xa_ipt, -1.0)]
            ms_opt_list = []
            x_opt, e_opt, ms_opt = sess.run([x, e, ms], feed_dict={xa: xa_ipt, a_: a__ipt, b_: b__ipt})
            x_opt_list.append(x_opt)
            e_opt_list.append(e_opt)
            ms_opt_list.append(ms_opt)

            if args.with_mask:
                # resize all masks to the same size
                for ms_opt in ms_opt_list:  # attribute axis
                    for i, m_opt in enumerate(ms_opt):  # mask level axis
                        m_opt_resized = []
                        for m_j_opt in m_opt:  # batch axis
                            m_opt_resized.append(im.imresize(m_j_opt * 2 - 1, (args.crop_size, args.crop_size)))
                        ms_opt[i] = np.concatenate([np.array(m_opt_resized)] * 3, axis=-1)
                ms_opt_list = [np.full_like(ms_opt_list[0], -1.0)] + ms_opt_list
                ms_opt_list = list(np.transpose(ms_opt_list, (1, 0, 2, 3, 4, 5)))[::-1]
                sample_m = np.transpose([x_opt_list, e_opt_list] + ms_opt_list, (2, 0, 3, 1, 4, 5))
            else:
                sample_m = np.transpose([x_opt_list], (2, 0, 3, 1, 4, 5))
            sample_m = np.reshape(sample_m, (sample_m.shape[0], -1, sample_m.shape[3] * sample_m.shape[4], sample_m.shape[5]))

            for s in sample_m:
                cnt += 1
                im.imwrite(s, '%s/%d.jpg' % (save_dir, cnt))

    return run
예제 #23
0
py.arg('--identity_loss_weight', type=float, default=0.0)
py.arg('--pool_size', type=int, default=50)  # pool size to store fake samples

#model parameters

py.arg('--dim', type=int, default=64)
py.arg('--n_downsamplings', type=int, default=2)
py.arg('--n_blocks', type=int, default=9)
py.arg('--norm', type=str, default='instance_norm')
py.arg('--augmentation', type=str, default='Normal')

args = py.args()

# output_dir
output_dir = py.join(
    'output', args.dataset +
    ('' if args.output_index == '' else '_' + str(args.output_index)))
py.mkdir(output_dir)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)

# ==============================================================================
# =                                    data                                    =
# ==============================================================================

A_img_paths = py.glob(py.join('datasets', args.dataset, 'trainA'), '*.jpg')
B_img_paths = py.glob(py.join('datasets', args.dataset, 'trainB'), '*.jpg')
A_B_dataset, len_dataset = data.make_zip_dataset(
    A_img_paths,
    B_img_paths,
예제 #24
0
    '--img_dir',
    default=
    './data/img_celeba/aligned/align_size(572,572)_move(0.250,0.000)_face_factor(0.450)_jpg/data'
)
py.arg('--test_label_path', default='./data/img_celeba/test_label.txt')
py.arg('--test_att_names',
       choices=data.ATT_ID.keys(),
       nargs='+',
       default=['Bangs', 'Mustache'])
py.arg('--test_ints', type=float, nargs='+', default=2)

py.arg('--experiment_name', default='default')
args_ = py.args()

# output_dir
output_dir = py.join('output', args_.experiment_name)

# save settings
args = py.args_from_yaml(py.join(output_dir, 'settings.yml'))
args.__dict__.update(args_.__dict__)

# others
n_atts = len(args.att_names)
if not isinstance(args.test_ints, list):
    args.test_ints = [args.test_ints] * len(args.test_att_names)
elif len(args.test_ints) == 1:
    args.test_ints = args.test_ints * len(args.test_att_names)

sess = tl.session()
sess.__enter__()  # make default
예제 #25
0
import module

# configuration
DEBUG = True
os.environ["CUDA_PATH"] = "/usr/local/cuda"

py.arg('--flask_path', default='/var/www/html/flaskapp_unigan')
py.arg('--img_dir', default='./data/zappos_50k/images')
py.arg('--test_label_path', default='./data/zappos_50k/test_label.txt')
py.arg('--test_int', type=float, default=2)
py.arg('--experiment_name', default='UniGAN_128')
args_ = py.args()

# output_dir
output_dir = os.path.join(args_.flask_path,
                          py.join('output', args_.experiment_name))

# save settings
args = py.args_from_yaml(py.join(output_dir, 'settings.yml'))
args.__dict__.update(args_.__dict__)

# others
n_atts = len(args.att_names)

sess = tl.session()
sess.__enter__()  # make default

# ==============================================================================
# =                               data and model                               =
# ==============================================================================
예제 #26
0
py.arg('--experiment_name', default='none')
py.arg('--gradient_penalty_d_norm',
       default='layer_norm',
       choices=['instance_norm', 'layer_norm'])  # !!!
args = py.args()

N = 88
epsilon = 1e-8

# output_dir
if args.experiment_name == 'none':
    args.experiment_name = '%s_%s_9_15' % (args.dataset,
                                           args.adversarial_loss_mode)
    if args.gradient_penalty_mode != 'none':
        args.experiment_name += '_%s' % args.gradient_penalty_mode
output_dir = py.join('output', args.experiment_name)
py.mkdir(output_dir)

# others
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")

# ==============================================================================
# =                                    data                                    =
# ==============================================================================

# setup dataset
if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']:  # 32x32
    data_loader, shape = data.make_32x32_dataset(args.dataset,
                                                 args.batch_size,
                                                 pin_memory=use_gpu)
예제 #27
0
# The code randomly sample 1000 from dataset and output to /evaluation/[language] file
import numpy as np
import os
from PIL import Image
import glob
import cv2
import pylib as py
import imlib as im

language = 'japanese'

train_dir = "./dataset/" + language
output_dir = "./evaluation/training"
file_list = os.listdir(train_dir)
imgs = []
for file in file_list:
    img = cv2.imread(os.path.join(train_dir, file), 0)
    imgs.append(img)
np.random.shuffle(imgs)
#print(np.shape(imgs[1]))
for i in range(0, 1000):
    cv2.imwrite(py.join(output_dir, language + '%03d.jpg' % i), imgs[i])
print(np.shape(imgs))
예제 #28
0
파일: train.py 프로젝트: liuhd073/GANs
py.arg('--lr', type=float, default=0.0002)  #default 0.0002
py.arg('--beta_1', type=float, default=0.5)
py.arg('--adversarial_loss_mode',
       default='lsgan',
       choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'])
py.arg('--gradient_penalty_mode',
       default='none',
       choices=['none', 'dragan', 'wgan-gp'])
py.arg('--gradient_penalty_weight', type=float, default=10.0)
py.arg('--cycle_loss_weight', type=float, default=10.0)
py.arg('--identity_loss_weight', type=float, default=0.0)
py.arg('--pool_size', type=int, default=25)  # pool size to store fake samples
args = py.args()

# output_dir
output_dir = py.join('output5-just watching MSe etc ', args.dataset)
py.mkdir(output_dir)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)

# ==============================================================================
# =                                    data                                    =
# ==============================================================================

A_img_paths = py.glob(py.join(args.datasets_dir, args.dataset, 'trainA'),
                      '*.png')
B_img_paths = py.glob(py.join(args.datasets_dir, args.dataset, 'trainB'),
                      '*.png')
A_B_dataset, len_dataset = data.make_zip_dataset(A_img_paths,
                                                 B_img_paths,
예제 #29
0
def sample_graph():
    # ======================================
    # =               graph                =
    # ======================================

    test_next = test_iter.get_next()

    if not os.path.exists(py.join(output_dir, 'generator.pb')):
        # model
        Genc, Gdec, _ = module.get_model(args.model,
                                         n_atts,
                                         weight_decay=args.weight_decay)

        # placeholders & inputs
        xa = tf.placeholder(tf.float32,
                            shape=[None, args.crop_size, args.crop_size, 3])
        b_ = tf.placeholder(tf.float32, shape=[None, n_atts])

        # sample graph
        x = Gdec(Genc(xa, training=False), b_, training=False)
    else:
        # load freezed model
        with tf.gfile.GFile(py.join(output_dir, 'generator.pb'), 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='generator')

        # placeholders & inputs
        xa = sess.graph.get_tensor_by_name('generator/xa:0')
        b_ = sess.graph.get_tensor_by_name('generator/b_:0')

        # sample graph
        x = sess.graph.get_tensor_by_name('generator/xb:0')

    # ======================================
    # =            run function            =
    # ======================================

    save_dir = './output/%s/samples_testing_multi' % args.experiment_name
    tmp = ''
    for test_att_name, test_int in zip(args.test_att_names, args.test_ints):
        tmp += '_%s_%s' % (test_att_name, '{:g}'.format(test_int))
    save_dir = py.join(save_dir, tmp[1:])
    py.mkdir(save_dir)

    def run():
        cnt = 0
        for _ in tqdm.trange(len_test_dataset):
            # data for sampling
            xa_ipt, a_ipt = sess.run(test_next)
            b_ipt = np.copy(a_ipt)
            for test_att_name in args.test_att_names:
                i = args.att_names.index(test_att_name)
                b_ipt[..., i] = 1 - b_ipt[..., i]
                b_ipt = data.check_attribute_conflict(b_ipt, test_att_name,
                                                      args.att_names)

            b__ipt = (b_ipt * 2 - 1).astype(np.float32)  # !!!
            for test_att_name, test_int in zip(args.test_att_names,
                                               args.test_ints):
                i = args.att_names.index(test_att_name)
                b__ipt[..., i] = b__ipt[..., i] * test_int

            x_opt_list = [xa_ipt]
            x_opt = sess.run(x, feed_dict={xa: xa_ipt, b_: b__ipt})
            x_opt_list.append(x_opt)
            sample = np.transpose(x_opt_list, (1, 2, 0, 3, 4))
            sample = np.reshape(sample, (sample.shape[0], -1, sample.shape[2] *
                                         sample.shape[3], sample.shape[4]))

            for s in sample:
                cnt += 1
                im.imwrite(s, '%s/%d.jpg' % (save_dir, cnt))

    return run
예제 #30
0
# ==============================================================================

py.arg('--flask_path', default='/var/www/html/flaskapp_unigan')
py.arg('--generator_pb', default='generator_unigan_gender_only_beta_0.5.pb')
py.arg('--img_dir', default='./data/zappos_50k/images')
py.arg('--test_label_path', default='./data/zappos_50k/test_label.txt')
py.arg('--test_att_name', choices=data.ATT_ID.keys(), default='Women')
py.arg('--test_int_min', type=float, default=-2)
py.arg('--test_int_max', type=float, default=2)
py.arg('--test_int_step', type=float, default=0.5)

py.arg('--experiment_name', default='default')
args_ = py.args()

# output_dir
output_dir = os.path.join(args_.flask_path, py.join('output', args_.experiment_name))
# output_dir = py.join('output', args_.experiment_name)

# save settings
args = py.args_from_yaml(py.join(output_dir, 'settings.yml'))
args.__dict__.update(args_.__dict__)

# others
n_atts = len(args.att_names)

sess = tl.session()
sess.__enter__()  # make default


# ==============================================================================
# =                               data and model                               =