Ejemplo n.º 1
0
def get_dataset(dataset_name, batch_size):
    if dataset_name == 'mnist':
        # dataset
        pylib.mkdir('./data/mnist')
        dataset = tl.Mnist(data_dir='./data/mnist', batch_size=batch_size)

        # get next func
        def get_next():
            return dataset.get_next()['img']

        dataset.get_next_ = get_next

        # shape
        img_shape = [28, 28, 1]

    elif dataset_name == 'celeba':
        # dataset
        def _map_func(img):
            crop_size = 108
            re_size = 64
            img = tf.image.crop_to_bounding_box(img, (218 - crop_size) // 2, (178 - crop_size) // 2, crop_size, crop_size)
            img = tf.image.resize_images(img, [re_size, re_size], method=tf.image.ResizeMethod.BICUBIC)
            img = tf.clip_by_value(img, 0, 255) / 127.5 - 1
            return img

        paths = glob.glob('./data/celeba/img_align_celeba/*.jpg')
        dataset = tl.DiskImageData(img_paths=paths, batch_size=batch_size, map_func=_map_func)

        # get next func
        dataset.get_next_ = dataset.get_next

        # shape
        img_shape = [64, 64, 3]

    return dataset, img_shape
Ejemplo n.º 2
0
def sample_graph():

    # ======================================
    # =               graph                =
    # ======================================

    # placeholders & inputs
    zs = [
        tl.truncated_normal([args.n_samples, z_dim],
                            minval=-args.truncation_threshold,
                            maxval=args.truncation_threshold)
        for z_dim in args.z_dims
    ]
    eps = tl.truncated_normal([args.n_samples, args.eps_dim],
                              minval=-args.truncation_threshold,
                              maxval=args.truncation_threshold)

    # generate
    x_f = G_test(zs, eps, training=False)

    # ======================================
    # =            run function            =
    # ======================================

    save_dir = './output/%s/samples_training/sample' % (args.experiment_name)
    py.mkdir(save_dir)

    def run(epoch, iter):
        x_f_opt = sess.run(x_f)
        sample = im.immerge(x_f_opt, n_rows=int(args.n_samples**0.5))
        im.imwrite(sample, '%s/Epoch-%d_Iter-%d.jpg' % (save_dir, epoch, iter))

    return run
Ejemplo n.º 3
0
def sample_graph():
    # ======================================
    # =               graph                =
    # ======================================

    if not os.path.exists(py.join(output_dir, args_.generator_pb)):
        # model
        Genc, Gdec, _ = module.get_model(args.model, n_atts, weight_decay=args.weight_decay)

        # placeholders & inputs
        xa = tf.placeholder(tf.float32, shape=[None, args.crop_size, args.crop_size, 3])
        b_ = tf.placeholder(tf.float32, shape=[None, n_atts])

        # sample graph
        x = Gdec(Genc(xa, training=False), b_, training=False)
    else:
        # load freezed model
        with tf.gfile.GFile(py.join(output_dir, args_.generator_pb), 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='generator')

        # placeholders & inputs
        xa = sess.graph.get_tensor_by_name('generator/xa:0')
        b_ = sess.graph.get_tensor_by_name('generator/b_:0')

        # sample graph
        x = sess.graph.get_tensor_by_name('generator/xb:0')

    # ======================================
    # =            run function            =
    # ======================================

    save_dir = '%s/output/%s/samples_testing_slide/%s_%s_%s_%s' % \
        (args.flask_path, args.experiment_name, args.test_att_name, '{:g}'.format(args.test_int_min), '{:g}'.format(args.test_int_max), '{:g}'.format(args.test_int_step))
    py.mkdir(save_dir)

    def run():
        cnt = 0
        for _ in tqdm.trange(len_test_dataset):
            # data for sampling
            xa_ipt, a_ipt = sess.run(test_iter.get_next())
            b_ipt = np.copy(a_ipt)
            b__ipt = (b_ipt * 2 - 1).astype(np.float32)  # !!!

            x_opt_list = [xa_ipt]
            for test_int in np.arange(args.test_int_min, args.test_int_max + 1e-5, args.test_int_step):
                b__ipt[:, args.att_names.index(args.test_att_name)] = test_int
                x_opt = sess.run(x, feed_dict={xa: xa_ipt, b_: b__ipt})
                x_opt_list.append(x_opt)
            sample = np.transpose(x_opt_list, (1, 2, 0, 3, 4))
            sample = np.reshape(sample, (sample.shape[0], -1, sample.shape[2] * sample.shape[3], sample.shape[4]))

            for s in sample:
                cnt += 1
                im.imwrite(s, '%s/%d.jpg' % (save_dir, cnt))

    return run
Ejemplo n.º 4
0
def get_dataset_models(dataset_name):
    if dataset_name == 'mnist':
        import models
        pylib.mkdir('./data/mnist')
        Dataset = partial(tl.Mnist, data_dir='./data/mnist', repeat=1)
        return Dataset, {'D': models.D, 'G': models.G}

    elif dataset_name == 'celeba':
        import models_64x64
        raise NotImplementedError
Ejemplo n.º 5
0
def sample_graph():
    # ======================================
    # =               graph                =
    # ======================================

    # placeholders & inputs
    val_next = val_iter.get_next()
    xa = tf.placeholder(tf.float32,
                        shape=[None, args.crop_size, args.crop_size, 3])
    b_ = tf.placeholder(tf.float32, shape=[None, n_atts])

    # sample graph
    x = Gdec(Genc(xa, training=False), b_, training=False)

    # ======================================
    # =            run function            =
    # ======================================

    save_dir = './output/%s/samples_training' % args.experiment_name
    py.mkdir(save_dir)

    def run(epoch, iter):
        # data for sampling
        xa_ipt, a_ipt = sess.run(val_next)
        b_ipt_list = [a_ipt]  # the first is for reconstruction
        for i in range(n_atts):
            tmp = np.array(a_ipt, copy=True)
            tmp[:, i] = 1 - tmp[:, i]  # inverse attribute
            tmp = data.check_attribute_conflict(tmp, args.att_names[i],
                                                args.att_names)
            b_ipt_list.append(tmp)

        x_opt_list = [xa_ipt]
        for i, b_ipt in enumerate(b_ipt_list):
            b__ipt = (b_ipt * 2 - 1).astype(np.float32)  # !!!
            if i > 0:  # i == 0 is for reconstruction
                b__ipt[..., i - 1] = b__ipt[..., i - 1] * args.test_int
            x_opt = sess.run(x, feed_dict={xa: xa_ipt, b_: b__ipt})
            x_opt_list.append(x_opt)
        sample = np.transpose(x_opt_list, (1, 2, 0, 3, 4))
        sample = np.reshape(
            sample, (-1, sample.shape[2] * sample.shape[3], sample.shape[4]))
        im.imwrite(sample, '%s/Epoch-%d_Iter-%d.jpg' % (save_dir, epoch, iter))

    return run
Ejemplo n.º 6
0
def get_dataset(dataset_name):
    if dataset_name == 'mnist':
        # dataset
        pylib.mkdir('./data/mnist')
        Dataset = partial(tl.Mnist, data_dir='./data/mnist', repeat=1)

        # shape
        img_shape = [28, 28, 1]

        # index func
        def get_imgs(batch):
            return batch['img']

        return Dataset, img_shape, get_imgs

    elif dataset_name == 'celeba':
        # dataset
        def _map_func(img):
            crop_size = 108
            re_size = 64
            img = tf.image.crop_to_bounding_box(img, (218 - crop_size) // 2,
                                                (178 - crop_size) // 2,
                                                crop_size, crop_size)
            img = tf.image.resize_images(img, [re_size, re_size],
                                         method=tf.image.ResizeMethod.BICUBIC)
            img = tf.clip_by_value(img, 0, 255) / 127.5 - 1
            return img

        paths = glob.glob('./data/celeba/img_align_celeba/*.jpg')
        Dataset = partial(tl.DiskImageData,
                          img_paths=paths,
                          repeat=1,
                          map_func=_map_func)

        # shape
        img_shape = [64, 64, 3]

        # index func
        def get_imgs(batch):
            return batch

        return Dataset, img_shape, get_imgs
Ejemplo n.º 7
0
def sample_A2B(A):
    A2B = G_A2B(A, training=False)
    A2B2A = G_B2A(A2B, training=False)
    return A2B, A2B2A


@tf.function
def sample_B2A(B):
    B2A = G_B2A(B, training=False)
    B2A2B = G_A2B(B2A, training=False)
    return B2A, B2A2B


# run
save_dir = py.join(args.experiment_dir, 'samples_testing', 'A2B')
py.mkdir(save_dir)
i = 0
for A in A_dataset_test:
    A2B, A2B2A = sample_A2B(A)
    for A_i, A2B_i, A2B2A_i in zip(A, A2B, A2B2A):
        img = np.concatenate(
            [A_i.numpy(), A2B_i.numpy(),
             A2B2A_i.numpy()], axis=1)
        im.imwrite(img, py.join(save_dir, py.name_ext(A_img_paths_test[i])))
        i += 1

save_dir = py.join(args.experiment_dir, 'samples_testing', 'B2A')
py.mkdir(save_dir)
i = 0
for B in B_dataset_test:
    B2A, B2A2B = sample_B2A(B)
Ejemplo n.º 8
0
py.arg('--gradient_penalty_sample_mode', choices=['line', 'real', 'fake', 'dragan'], default='line')
py.arg('--d_gradient_penalty_weight', type=float, default=10.0)
py.arg('--d_attribute_loss_weight', type=float, default=1.0)
py.arg('--g_attribute_loss_weight', type=float, default=10.0)
py.arg('--g_reconstruction_loss_weight', type=float, default=100.0)
py.arg('--weight_decay', type=float, default=0.0)

py.arg('--n_samples', type=int, default=12)
py.arg('--test_int', type=float, default=2.0)

py.arg('--experiment_name', default='default')
args = py.args()

# output_dir
output_dir = py.join('output', args.experiment_name)
py.mkdir(output_dir)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)

# others
n_atts = len(args.att_names)


train_dataset, len_train_dataset = data.make_celeba_dataset(args.img_dir, args.train_label_path, args.att_names, args.batch_size,
                                                            load_size=args.load_size, crop_size=args.crop_size,
                                                            training=True, shuffle=False, repeat=None)
print(len_train_dataset)
print(train_dataset)

train_iter = train_dataset.make_one_shot_iterator()
Ejemplo n.º 9
0
import imageio
import pylib as py

# ==============================================================================
# =                                   param                                    =
# ==============================================================================

py.arg('--save_path', default='pics/custom_gan.gif')
py.arg('--img_dir', default='output/_gan_dragan/samples_training')
py.arg('--max_frames', type=int, default=0)
args = py.args()

py.mkdir(py.directory(args.save_path))

# ==============================================================================
# =                                  make gif                                  =
# ==============================================================================
# modified from https://www.tensorflow.org/alpha/tutorials/generative/dcgan

with imageio.get_writer(args.save_path, mode='I', fps=8) as writer:
    filenames = sorted(py.glob(args.img_dir, '*.jpg'))
    if args.max_frames:
        step = len(filenames) // args.max_frames
    else:
        step = 1
    last = -1
    for i, filename in enumerate(filenames[::step]):
        frame = 2 * (i**0.3)
        if round(frame) > round(last):
            last = frame
        else:
Ejemplo n.º 10
0
# ==============================================================================
# =                                    train                                   =
# ==============================================================================

# session
sess = tl.session()

# saver
saver = tf.train.Saver(max_to_keep=1)

# summary writer
summary_writer = tf.summary.FileWriter('./output/%s/summaries' % experiment_name, sess.graph)

# initialization
ckpt_dir = './output/%s/checkpoints' % experiment_name
pylib.mkdir(ckpt_dir)
try:
    tl.load_checkpoint(ckpt_dir, sess)
except:
    sess.run(tf.global_variables_initializer())

# train
try:
    z_ipt_sample = np.random.normal(size=[100, z_dim])

    it = -1
    it_per_epoch = len(dataset) // batch_size
    for ep in range(epoch):
        dataset.reset()
        for batch in dataset:
            it += 1
Ejemplo n.º 11
0
    def verification(self, path_list, labels, name_list):
        atts = ['Bags_Under_Eyes']
        att_val = [2.0]
        img_size = 256
        n_slide = 10
        test_slide = False
        thres_int = 0.5

        sess_1 = tf.Session()
        te_data = data.MyCeleba(img_size,
                                1,
                                path_list,
                                labels,
                                part='test',
                                sess=sess_1,
                                crop=False)
        for idx, batch in enumerate(te_data):
            print(idx)
            xa_sample_ipt = batch[0]
            a_sample_ipt = batch[1]
            print(a_sample_ipt)
            b_sample_ipt_list = [
                a_sample_ipt.copy()
                for _ in range(n_slide if test_slide else 1)
            ]
            for a in atts:
                i = self.att_default.index(a)
                b_sample_ipt_list[-1][:, i] = 1 - b_sample_ipt_list[-1][:, i]
                b_sample_ipt_list[-1] = data.Celeba.check_attribute_conflict(
                    b_sample_ipt_list[-1], self.att_default[i],
                    self.att_default)
            x_sample_opt_list = [
                xa_sample_ipt,
                np.full((1, img_size, img_size // 10, 3), -1.0)
            ]
            raw_a_sample_ipt = a_sample_ipt.copy()
            raw_a_sample_ipt = (raw_a_sample_ipt * 2 - 1) * thres_int
            for i, b_sample_ipt in enumerate(b_sample_ipt_list):
                _b_sample_ipt = (b_sample_ipt * 2 - 1) * thres_int
                if not test_slide:
                    if atts:  # i must be 0
                        for t_att, t_int in zip(atts, att_val):
                            _b_sample_ipt[...,
                                          atts.index(t_att)] = _b_sample_ipt[
                                              ..., atts.index(t_att)] * t_int
                    if i > 0:  # i == 0 is for reconstruction
                        _b_sample_ipt[..., i -
                                      1] = _b_sample_ipt[..., i - 1] * test_int
                x_sample_opt_list.append(
                    self.sess_GAN.run(self.x_sample,
                                      feed_dict={
                                          self.xa_sample: xa_sample_ipt,
                                          self._b_sample: _b_sample_ipt,
                                          self.raw_b_sample: raw_a_sample_ipt
                                      }))
            sample = np.concatenate(x_sample_opt_list, 2)

            save_folder = 'sample_testing_multi/' + atts[0]
            save_dir = './output/%s/%s' % (256, save_folder)
            print(save_dir)
            pylib.mkdir(save_dir)
            mp.imsave(
                '%s/%s_' % (save_dir, name_list[idx]) + str(att_val[0]) +
                '.png', (sample.squeeze(0) + 1.0) / 2.0)

            print('%s/%s_' % (save_dir, name_list[idx]) + str(att_val[0]) +
                  '.png' + ' is done!')

        sess_1.close()
Ejemplo n.º 12
0
py.arg('--beta_1', type=float, default=0.5)
py.arg('--n_d', type=int, default=1)  # # d updates per g update
py.arg('--z_dim', type=int, default=128)
py.arg('--adversarial_loss_mode', default='gan', choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'])
py.arg('--gradient_penalty_mode', default='none', choices=['none', 'dragan', 'wgan-gp'])
py.arg('--gradient_penalty_weight', type=float, default=10.0)
py.arg('--experiment_name', default='none')
args = py.args()

# output_dir
if args.experiment_name == 'none':
    args.experiment_name = '%s_%s' % (args.dataset, args.adversarial_loss_mode)
    if args.gradient_penalty_mode != 'none':
        args.experiment_name += '_%s' % args.gradient_penalty_mode
output_dir = py.join('output', '%s_BN%d_DPG%d' % (args.experiment_name, args.batch_size, args.n_d ) )
py.mkdir(output_dir)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)


# ==============================================================================
# =                               data and model                               =
# ==============================================================================

# setup dataset
if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']:  # 32x32
    dataset, shape, len_dataset = data.make_32x32_dataset(args.dataset, args.batch_size)
    n_G_upsamplings = n_D_downsamplings = 3

elif args.dataset == 'celeba':  # 64x64
Ejemplo n.º 13
0
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")
torch.manual_seed(0)

if args.dataset in ['cifar10', 'fashion_mnist', 'mnist', 'imagenet']:  # 32x32
    output_channels = 3
    n_G_upsamplings = n_D_downsamplings = 3

for experiment in experiment_names:
    output_dir = py.join('output_new', 'output', experiment)

    G = module.ConvGenerator(args.z_dim,
                             output_channels,
                             n_upsamplings=n_G_upsamplings).to(device)

    # load checkpoint if exists
    ckpt_dir = py.join(output_dir, 'checkpoints', args.checkpoint_name)
    out_dir = py.join(output_dir, args.output_dir)
    py.mkdir(ckpt_dir)
    py.mkdir(out_dir)
    ckpt = torchlib.load_checkpoint(ckpt_dir)
    G.load_state_dict(ckpt['G'])

    for i in range(args.num_samples):
        z = torch.randn(args.batch_size, args.z_dim, 1, 1).to(device)
        x_fake = G(z).detach()
        x_fake = np.transpose(x_fake.data.cpu().numpy(), (0, 2, 3, 1))
        img = im.immerge(x_fake, n_rows=1).squeeze()
        im.imwrite(img, py.join(out_dir, 'img-%d.jpg' % i))
        print(py.join(out_dir, 'img-%d.jpg' % i))
Ejemplo n.º 14
0
dec_layers = args.dec_layers
dis_layers = args.dis_layers
# training
epoch = args.epoch
batch_size = args.batch_size
lr_base = args.lr
n_d = args.n_d
b_distribution = args.b_distribution
thres_int = args.thres_int
test_int = args.test_int
n_sample = args.n_sample
# others
use_cropped_img = args.use_cropped_img
experiment_name = args.experiment_name

pylib.mkdir('./output/%s' % experiment_name)
with open('./output/%s/setting.txt' % experiment_name, 'w') as f:
    f.write(json.dumps(vars(args), indent=4, separators=(',', ':')))


# ==============================================================================
# =                                   graphs                                   =
# ==============================================================================

# data
sess = tl.session()
tr_data = data.Celeba('./data', atts, img_size, batch_size, part='train', sess=sess, crop=not use_cropped_img)
val_data = data.Celeba('./data', atts, img_size, n_sample, part='val', shuffle=False, sess=sess, crop=not use_cropped_img)

# models
Genc = partial(models.Genc, dim=enc_dim, n_layers=enc_layers)
Ejemplo n.º 15
0
            np.full((1, img_size, img_size // 10, 3), -1.0)
        ]
        for i, b_sample_ipt in enumerate(b_sample_ipt_list):
            _b_sample_ipt = (b_sample_ipt * 2 - 1) * thres_int
            if i > 0:  # i == 0 is for reconstruction
                _b_sample_ipt[...,
                              i - 1] = _b_sample_ipt[..., i -
                                                     1] * test_int / thres_int
            x_sample_opt_list.append(
                sess.run(x_sample,
                         feed_dict={
                             xa_sample: xa_sample_ipt,
                             _b_sample: _b_sample_ipt
                         }))
        sample = np.concatenate(x_sample_opt_list, 2)
        # print(x_sample)

        save_dir = 'test/test_results'
        pylib.mkdir(save_dir)
        im.imwrite(sample.squeeze(0),
                   '%s/%s' % (save_dir, te_data.name_list[idx]))

        print('%s done!' % (te_data.name_list[idx]))
        os.remove('test/' + te_data.name_list[idx][:-4] + '_' +
                  te_data.name_list[idx][-4:])

except:
    traceback.print_exc()
finally:
    sess.close()
py.arg(
    '--dataset',
    default='fashion_mnist',
    choices=['cifar10', 'fashion_mnist', 'mnist', 'celeba', 'anime', 'custom'])
py.arg('--out_dir', required=True)
py.arg('--num_samples_per_class', type=int, required=True)
py.arg('--batch_size', type=int, default=1)
py.arg('--num', type=int, default=-1)
py.arg('--output_dir', type=str, default='generated_imgs')

args = py.args()

use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")

py.mkdir(args.out_dir)

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])

img_paths = 'data/imagenet_small/train'
data_loader, shape = data.make_custom_dataset(img_paths,
                                              1,
                                              resize=32,
                                              pin_memory=use_gpu)
num_samples = args.num_samples_per_class
out_path = py.join(args.out_dir, args.output_dir)
os.makedirs(out_path, exist_ok=True)
# session
sess = tl.session()

# initialize iterator
sess.run(train_init_op,
         feed_dict={
             x_data: target_data,
             y_data: target_train_labels
         })

# saver
saver = tf.train.Saver(max_to_keep=1)

# initialization
ckpt_dir = './output/%s/checkpoints' % experiment_name
pylib.mkdir(ckpt_dir)
sess.run(tf.global_variables_initializer())

# train
overall_it = 0
try:
    for ep in range(n_epochs):
        for it in range(iters_per_epoch):
            overall_it += 1
            # train classifier
            _ = sess.run(step, feed_dict={is_training: True})
            # display
            if (it + 1) % 10 == 0:
                batch_acc = sess.run(accuracy, feed_dict={is_training: False})
                print(
                    "Epoch: (%3d/%5d) iteration: (%5d/%5d) lr: %.6f train batch accuracy: %.3f"
def traversal_graph():

    # ======================================
    # =               graph                =
    # ======================================

    if not os.path.exists(py.join(output_dir, 'generator.pb')):
        # model
        G_test = functools.partial(
            module.G(scope='G_test'),
            n_channels=args.n_channels,
            use_gram_schmidt=args.g_loss_weight_orth_loss == 0,
            training=False)

        # placeholders & inputs
        zs = [
            tf.placeholder(dtype=tf.float32, shape=[args.n_traversal, z_dim])
            for z_dim in args.z_dims
        ]
        eps = tf.placeholder(dtype=tf.float32,
                             shape=[args.n_traversal, args.eps_dim])

        # generate
        x_f = G_test(zs, eps, training=False)

        L = tl.tensors_filter(G_test.func.variables, 'L')
    else:
        # load freezed model
        with tf.gfile.GFile(py.join(output_dir, 'generator.pb'), 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='generator')

        # placeholders & inputs
        zs = [
            sess.graph.get_tensor_by_name('generator/z_%d:0' % i)
            for i in range(len(args.z_dims))
        ]
        eps = sess.graph.get_tensor_by_name('generator/eps:0')

        # sample graph
        x_f = sess.graph.get_tensor_by_name('generator/x_f:0')

        L = [
            sess.graph.get_tensor_by_name('generator/L_%d:0' % i)
            for i in range(len(args.z_dims))
        ]

    # ======================================
    # =            run function            =
    # ======================================

    save_dir = './output/%s/samples_testing/traversal/all_dims/traversal_%d_%.2f' % (
        args.experiment_name, args.n_traversal_point,
        args.truncation_threshold)
    py.mkdir(save_dir)

    def run():
        zs_ipt_fixed = [
            scipy.stats.truncnorm.rvs(-args.truncation_threshold,
                                      args.truncation_threshold,
                                      size=[args.n_traversal, z_dim])
            for z_dim in args.z_dims
        ]
        eps_ipt = scipy.stats.truncnorm.rvs(
            -args.truncation_threshold,
            args.truncation_threshold,
            size=[args.n_traversal, args.eps_dim])

        left = -4.5
        right = 4.5
        for layer_idx in range(len(args.z_dims)):
            for eigen_idx in range(args.z_dims[layer_idx]):
                L_opt = sess.run(L)
                l = layer_idx
                j = eigen_idx
                i = np.argsort(np.abs(L_opt[l]))[::-1][j]

                x_f_opts = []
                vals = np.linspace(left, right, args.n_traversal_point)
                for v in vals:
                    zs_ipt = copy.deepcopy(zs_ipt_fixed)
                    zs_ipt[l][:, i] = v
                    feed_dict = {z: z_ipt for z, z_ipt in zip(zs, zs_ipt)}
                    feed_dict.update({eps: eps_ipt})
                    x_f_opt = sess.run(x_f, feed_dict=feed_dict)
                    x_f_opts.append(x_f_opt)

                sample = np.concatenate(x_f_opts, axis=2)
                for ii in range(args.n_traversal):
                    im.imwrite(
                        sample[ii], '%s/%04d_%d-%d-%.3f-%d.jpg' %
                        (save_dir, ii, l, j, np.abs(L_opt[l][i]), i))

    return run
mode = args.mode
epoch = args.epoch
batch_size = args.batch_size
lr_base = args.lr
n_d = args.n_d
b_distribution = args.b_distribution
thres_int = args.thres_int
test_int = args.test_int
n_sample = args.n_sample
# others
use_cropped_img = args.use_cropped_img
experiment_name = args.experiment_name

experiment_dir = args.experiment_dir

pylib.mkdir('./' + experiment_dir + '/%s' % experiment_name)
with open('./' + experiment_dir + '/%s/setting.txt' % experiment_name,
          'w') as f:
    f.write(json.dumps(vars(args), indent=4, separators=(',', ':')))

# ==============================================================================
# =                                   graphs                                   =
# ==============================================================================

# data

# added for memory allocation Qing
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
Ejemplo n.º 20
0
    def __init__(
            self,
            epochs=200,
            epoch_decay=100,
            pool_size=50,
            output_dir='output',
            datasets_dir="datasets",
            dataset="drawing",
            image_ext="png",
            crop_size=256,
            load_size=286,
            batch_size=0,
            adversarial_loss_mode="lsgan",  # ['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan']
            lr=0.0002,
            gradient_penalty_mode='none',  # ['none', 'dragan', 'wgan-gp'])
            gradient_penalty_weight=10.0,
            cycle_loss_weight=0.0,
            identity_loss_weight=0.0,
            beta_1=0.5,
            color_depth=1,
            progrssive=False):
        logging.config.fileConfig(fname='log.conf')
        self.logger = logging.getLogger('dev')

        if batch_size == 0:
            batch_size = 1  # later figure out what to do
        epoch_decay = min(epoch_decay, epochs // 2)

        self.output_dataset_dir = py.join(output_dir, dataset)
        py.mkdir(self.output_dataset_dir)
        py.args_to_yaml(
            py.join(self.output_dataset_dir, 'settings.yml'),
            Namespace(
                epochs=epochs,
                epoch_decay=epoch_decay,
                pool_size=pool_size,
                output_dir=output_dir,
                datasets_dir=datasets_dir,
                dataset=dataset,
                image_ext=image_ext,
                crop_size=crop_size,
                load_size=load_size,
                batch_size=batch_size,
                adversarial_loss_mode=
                adversarial_loss_mode,  # ['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan']
                lr=lr,
                gradient_penalty_mode=
                gradient_penalty_mode,  # ['none', 'dragan', 'wgan-gp'])
                gradient_penalty_weight=gradient_penalty_weight,
                cycle_loss_weight=cycle_loss_weight,
                identity_loss_weight=identity_loss_weight,
                beta_1=beta_1,
                color_depth=color_depth,
                progressive=progrssive))
        self.sample_dir = py.join(self.output_dataset_dir, 'samples_training')
        py.mkdir(self.sample_dir)

        self.epochs = epochs
        self.epoch_decay = epoch_decay
        self.pool_size = pool_size
        self.gradient_penalty_mode = gradient_penalty_mode
        self.gradient_penalty_weight = gradient_penalty_weight
        self.cycle_loss_weight = cycle_loss_weight
        self.identity_loss_weight = identity_loss_weight
        self.color_depth = color_depth
        self.adversarial_loss_mode = adversarial_loss_mode
        self.batch_size = batch_size
        self.beta_1 = beta_1
        self.color_depth = color_depth
        self.dataset = dataset
        self.datasets_dir = datasets_dir
        self.image_ext = image_ext
        self.progrssive = progrssive
        self.lr = lr

        self.crop_size = crop_size
        self.load_size = load_size

        self.A_img_paths = py.glob(py.join(datasets_dir, dataset, 'trainA'),
                                   '*.{}'.format(image_ext))
        self.B_img_paths = py.glob(py.join(datasets_dir, dataset, 'trainB'),
                                   '*.{}'.format(image_ext))

        # summary
        self.train_summary_writer = tf.summary.create_file_writer(
            py.join(self.output_dataset_dir, 'summaries', 'train'))
Ejemplo n.º 21
0
                           max_to_keep=1)
try:  # restore checkpoint including the epoch counter
    checkpoint.restore().assert_existing_objects_matched()
except Exception as e:
    print(e)

# summary
train_summary_writer = tf.summary.create_file_writer(
    py.join(output_dir, 'summaries', 'train'))

# sample
test_iter = iter(
    A_B_dataset_test
)  ##----------------------------------->>>>><<<<<<<<<>>>>>>>>>>><<<<<<<<<<<>>>>>>>>>>>
sample_dir = py.join(output_dir, 'samples_training')
py.mkdir(sample_dir)

# main loop
with train_summary_writer.as_default():
    for ep in tqdm.trange(args.epochs, desc='Epoch Loop'):
        if ep < ep_cnt:
            continue

        # update epoch counter
        ep_cnt.assign_add(1)

        # train for an epoch
        for A, B in tqdm.tqdm(A_B_dataset,
                              desc='Inner Epoch Loop',
                              total=len_dataset,
                              position=0):
Ejemplo n.º 22
0
       default='gan',
       choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'])
py.arg('--gradient_penalty_mode',
       default='none',
       choices=['none', 'dragan', 'wgan-gp'])
py.arg('--gradient_penalty_weight', type=float, default=10.0)
py.arg('--experiment_name', default='none')
args = py.args()

# output_dir
if args.experiment_name == 'none':
    args.experiment_name = '%s_%s' % (args.dataset, args.adversarial_loss_mode)
    if args.gradient_penalty_mode != 'none':
        args.experiment_name += '_%s' % args.gradient_penalty_mode
output_dir = py.join('output', args.experiment_name)
py.mkdir(output_dir)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)

# ==============================================================================
# =                               data and model                               =
# ==============================================================================

# setup dataset
if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']:  # 32x32
    dataset, shape, len_dataset = data.make_32x32_dataset(
        args.dataset, args.batch_size)
    n_G_upsamplings = n_D_downsamplings = 3

elif args.dataset == 'celeba':  # 64x64
py.arg('--beta_1', type=float, default=0.5)
py.arg('--n_d', type=int, default=1)  # # d updates per g update
py.arg('--z_dim', type=int, default=128)
py.arg('--adversarial_loss_mode', default='gan', choices=['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'])
py.arg('--gradient_penalty_mode', default='none', choices=['none', 'dragan', 'wgan-gp'])
py.arg('--gradient_penalty_weight', type=float, default=10.0)
py.arg('--experiment_name', default='none')
args = py.args()

# output_dir
if args.experiment_name == 'none':
    args.experiment_name = '%s_%s' % (args.dataset, args.adversarial_loss_mode)
    if args.gradient_penalty_mode != 'none':
        args.experiment_name += '_%s' % args.gradient_penalty_mode
output_dir = py.join('output', args.experiment_name)
py.mkdir(output_dir)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)


# ==============================================================================
# =                               data and model                               =
# ==============================================================================

# setup dataset
if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']:  # 32x32
    dataset, shape, len_dataset = data.make_32x32_dataset(args.dataset, args.batch_size)
    n_G_upsamplings = n_D_downsamplings = 3

elif args.dataset == 'celeba':  # 64x64
Ejemplo n.º 24
0
            'gp': gp.data.cpu().numpy()}


@torch.no_grad()
def sample(z):
    G.eval()
    return G(z)

# ==============================================================================
# =                                    run                                     =
# ==============================================================================


# load checkpoint if exists
ckpt_dir = py.join(output_dir, 'checkpoints')
py.mkdir(ckpt_dir)
try:
    ckpt = torchlib.load_checkpoint(py.join(ckpt_dir, 'Last.ckpt'))
    ep, it_d, it_g = ckpt['ep'], ckpt['it_d'], ckpt['it_g']
    D.load_state_dict(ckpt['D'])
    G.load_state_dict(ckpt['G'])
    D_optimizer.load_state_dict(ckpt['D_optimizer'])
    G_optimizer.load_state_dict(ckpt['G_optimizer'])
    print('loading was successful. Starting at epoch: ', ep)
except Exception as e:
    print(e)
    ep, it_d, it_g = 0, 0, 0

# sample
sample_dir = py.join(output_dir, 'samples_training')
py.mkdir(sample_dir)
Ejemplo n.º 25
0
def train_CycleGAN():

    import logGPU_RAM

    # summary
    train_summary_writer = tf.summary.create_file_writer(
        py.join(output_dir, 'summaries', 'train'))
    logGPU_RAM.init_gpu_writers(py.join(output_dir, 'summaries', 'GPUs'))

    # sample
    test_iter = iter(A_B_dataset_test)
    sample_dir = py.join(output_dir, 'samples_training')
    py.mkdir(sample_dir)

    test_sample = next(test_iter)

    # timeing
    import time
    start_time = time.time()

    # main loop
    with train_summary_writer.as_default():
        for ep in tqdm.trange(args.epochs, desc='Epoch Loop'):
            if ep < ep_cnt:
                continue

            # update epoch counter
            ep_cnt.assign_add(1)

            # train for an epoch
            for A, B in tqdm.tqdm(A_B_dataset,
                                  desc='Inner Epoch Loop',
                                  total=len_dataset):
                G_loss_dict, D_loss_dict = train_step(A, B)

                iteration = G_optimizer.iterations.numpy()

                # # summary
                tl.summary(G_loss_dict, step=iteration, name='G_losses')
                tl.summary(D_loss_dict, step=iteration, name='D_losses')
                tl.summary(
                    {'learning rate': G_lr_scheduler.current_learning_rate},
                    step=iteration,
                    name='learning rate')
                tl.summary(
                    {'second since start': np.array(time.time() - start_time)},
                    step=iteration,
                    name='second_Per_Iteration')
                logGPU_RAM.log_gpu_memory_to_tensorboard()

                # sample
                if iteration % 1000 == 0:
                    A, B = next(test_iter)
                    A2B, B2A, A2B2A, B2A2B = sample(A, B)
                    img = im.immerge(np.concatenate(
                        [A, A2B, A2B2A, B, B2A, B2A2B], axis=0),
                                     n_rows=2)
                    im.imwrite(
                        img,
                        py.join(sample_dir,
                                'iter-%09d-sample-test-random.jpg' %
                                iteration))
                if iteration % 100 == 0:
                    A, B = test_sample
                    A2B, B2A, A2B2A, B2A2B = sample(A, B)
                    img = im.immerge(np.concatenate(
                        [A, A2B, A2B2A, B, B2A, B2A2B], axis=0),
                                     n_rows=2)
                    im.imwrite(
                        img,
                        py.join(
                            sample_dir,
                            'iter-%09d-sample-test-specific.jpg' % iteration))
            # save checkpoint
            checkpoint.save(ep)
Ejemplo n.º 26
0
This scripts produces the TSNE plots appearing in Section 4.2 of the manuscript

"""
import os
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import pylib
import scatterHist as sh

experiment_name = [x[0] for x in os.walk('./output')]
experiment_name = experiment_name[1].split('/')[2]
calibrated_data_dir = './output/%s/calibrated_data' % experiment_name
plots_dir = './output/%s/tsne_plots' % experiment_name
pylib.mkdir(plots_dir)

# load data
source_train_data = np.loadtxt(calibrated_data_dir + '/source_train_data.csv',
                               delimiter=',')
target_train_data = np.loadtxt(calibrated_data_dir + '/target_train_data.csv',
                               delimiter=',')
calibrated_source_train_data = np.loadtxt(calibrated_data_dir +
                                          '/calibrated_source_train_data.csv',
                                          delimiter=',')
reconstructed_target_train_data = np.loadtxt(
    calibrated_data_dir + '/reconstructed_target_train_data.csv',
    delimiter=',')
n_s = source_train_data.shape[0]
n_t = target_train_data.shape[0]
Ejemplo n.º 27
0
def sample_graph():
    # ======================================
    # =               graph                =
    # ======================================

    if not os.path.exists(py.join(output_dir, 'generator.pb')):
        # model
        Genc, Gdec, _ = module.get_model(args.model,
                                         n_atts,
                                         weight_decay=args.weight_decay)

        # placeholders & inputs
        xa = tf.placeholder(tf.float32,
                            shape=[None, args.crop_size, args.crop_size, 3])
        b_ = tf.placeholder(tf.float32, shape=[None, n_atts])

        # sample graph
        x = Gdec(Genc(xa, training=False), b_, training=False)
    else:
        # load freezed model
        with tf.gfile.GFile(py.join(output_dir, 'generator.pb'), 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='generator')

        # placeholders & inputs
        xa = sess.graph.get_tensor_by_name('generator/xa:0')
        b_ = sess.graph.get_tensor_by_name('generator/b_:0')

        # sample graph
        x = sess.graph.get_tensor_by_name('generator/xb:0')

    # ======================================
    # =            run function            =
    # ======================================

    save_dir = '%s/output/%s/samples_testing_%s' % (
        args.flask_path, args.experiment_name, '{:g}'.format(args.test_int))
    py.mkdir(save_dir)

    def run():
        cnt = 0
        for _ in tqdm.trange(len_test_dataset):
            # data for sampling
            xa_ipt, a_ipt = sess.run(test_iter.get_next())
            b_ipt_list = [a_ipt]  # the first is for reconstruction
            for i in range(n_atts):
                tmp = np.array(a_ipt, copy=True)
                tmp[:, i] = 1 - tmp[:, i]  # inverse attribute
                tmp = data.check_attribute_conflict(tmp, args.att_names[i],
                                                    args.att_names)
                b_ipt_list.append(tmp)

            x_opt_list = [xa_ipt]
            for i, b_ipt in enumerate(b_ipt_list):
                b__ipt = (b_ipt * 2 - 1).astype(np.float32)  # !!!
                if i > 0:  # i == 0 is for reconstruction
                    b__ipt[..., i - 1] = b__ipt[..., i - 1] * args.test_int
                x_opt = sess.run(x, feed_dict={xa: xa_ipt, b_: b__ipt})
                x_opt_list.append(x_opt)
            sample = np.transpose(x_opt_list, (1, 2, 0, 3, 4))
            sample = np.reshape(sample, (sample.shape[0], -1, sample.shape[2] *
                                         sample.shape[3], sample.shape[4]))

            for s in sample:
                cnt += 1
                im.imwrite(s, '%s/%d.jpg' % (save_dir, cnt))

    return run
Ejemplo n.º 28
0
def sample_graph():
    # ======================================
    # =               graph                =
    # ======================================

    if not os.path.exists(py.join(output_dir, 'generator.pb')):
        # model
        G = functools.partial(module.PAGANG(), dim=args.dim, weight_decay=args.weight_decay)

        # placeholders & inputs
        xa = tf.placeholder(tf.float32, shape=[None, args.crop_size, args.crop_size, 3])
        a_ = tf.placeholder(tf.float32, shape=[None, n_atts])
        b_ = tf.placeholder(tf.float32, shape=[None, n_atts])

        # sample graph
        x, e, ms, _ = G(xa, b_ - a_, training=False)
    else:
        # load freezed model
        with tf.gfile.GFile(py.join(output_dir, 'generator.pb'), 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='generator')

        # placeholders & inputs
        xa = sess.graph.get_tensor_by_name('generator/xa:0')
        a_ = sess.graph.get_tensor_by_name('generator/a_:0')
        b_ = sess.graph.get_tensor_by_name('generator/b_:0')

        # sample graph
        x = sess.graph.get_tensor_by_name('generator/xb:0')
        e = sess.graph.get_tensor_by_name('generator/e:0')
        ms = sess.graph.get_operation_by_name('generator/ms').outputs

    # ======================================
    # =            run function            =
    # ======================================

    if args.with_mask:
        save_dir = './output/%s/samples_testing_multi_with_mask' % args.experiment_name
    else:
        save_dir = './output/%s/samples_testing_multi' % args.experiment_name
    tmp = ''
    for test_att_name, test_int in zip(args.test_att_names, args.test_ints):
        tmp += '_%s_%s' % (test_att_name, '{:g}'.format(test_int))
    save_dir = py.join(save_dir, tmp[1:])
    py.mkdir(save_dir)

    def run():
        cnt = 0
        for _ in tqdm.trange(len_test_dataset):
            # data for sampling
            xa_ipt, a_ipt = sess.run(test_iter.get_next())
            b_ipt = np.copy(a_ipt)
            for test_att_name in args.test_att_names:
                i = args.att_names.index(test_att_name)
                b_ipt[..., i] = 1 - b_ipt[..., i]
                b_ipt = data.check_attribute_conflict(b_ipt, test_att_name, args.att_names)

            a__ipt = a_ipt * 2 - 1
            b__ipt = (b_ipt * 2 - 1).astype(np.float32)  # !!!
            for test_att_name, test_int in zip(args.test_att_names, args.test_ints):
                i = args.att_names.index(test_att_name)
                b__ipt[..., i] = b__ipt[..., i] * test_int

            x_opt_list = [xa_ipt]
            e_opt_list = [np.full_like(xa_ipt, -1.0)]
            ms_opt_list = []
            x_opt, e_opt, ms_opt = sess.run([x, e, ms], feed_dict={xa: xa_ipt, a_: a__ipt, b_: b__ipt})
            x_opt_list.append(x_opt)
            e_opt_list.append(e_opt)
            ms_opt_list.append(ms_opt)

            if args.with_mask:
                # resize all masks to the same size
                for ms_opt in ms_opt_list:  # attribute axis
                    for i, m_opt in enumerate(ms_opt):  # mask level axis
                        m_opt_resized = []
                        for m_j_opt in m_opt:  # batch axis
                            m_opt_resized.append(im.imresize(m_j_opt * 2 - 1, (args.crop_size, args.crop_size)))
                        ms_opt[i] = np.concatenate([np.array(m_opt_resized)] * 3, axis=-1)
                ms_opt_list = [np.full_like(ms_opt_list[0], -1.0)] + ms_opt_list
                ms_opt_list = list(np.transpose(ms_opt_list, (1, 0, 2, 3, 4, 5)))[::-1]
                sample_m = np.transpose([x_opt_list, e_opt_list] + ms_opt_list, (2, 0, 3, 1, 4, 5))
            else:
                sample_m = np.transpose([x_opt_list], (2, 0, 3, 1, 4, 5))
            sample_m = np.reshape(sample_m, (sample_m.shape[0], -1, sample_m.shape[3] * sample_m.shape[4], sample_m.shape[5]))

            for s in sample_m:
                cnt += 1
                im.imwrite(s, '%s/%d.jpg' % (save_dir, cnt))

    return run
Ejemplo n.º 29
0
def sample_graph():
    # ======================================
    # =               graph                =
    # ======================================

    test_next = test_iter.get_next()

    if not os.path.exists(py.join(output_dir, 'generator.pb')):
        # model
        Genc, Gdec, _ = module.get_model(args.model,
                                         n_atts,
                                         weight_decay=args.weight_decay)

        # placeholders & inputs
        xa = tf.placeholder(tf.float32,
                            shape=[None, args.crop_size, args.crop_size, 3])
        b_ = tf.placeholder(tf.float32, shape=[None, n_atts])

        # sample graph
        x = Gdec(Genc(xa, training=False), b_, training=False)
    else:
        # load freezed model
        with tf.gfile.GFile(py.join(output_dir, 'generator.pb'), 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='generator')

        # placeholders & inputs
        xa = sess.graph.get_tensor_by_name('generator/xa:0')
        b_ = sess.graph.get_tensor_by_name('generator/b_:0')

        # sample graph
        x = sess.graph.get_tensor_by_name('generator/xb:0')

    # ======================================
    # =            run function            =
    # ======================================

    save_dir = './output/%s/samples_testing_multi' % args.experiment_name
    tmp = ''
    for test_att_name, test_int in zip(args.test_att_names, args.test_ints):
        tmp += '_%s_%s' % (test_att_name, '{:g}'.format(test_int))
    save_dir = py.join(save_dir, tmp[1:])
    py.mkdir(save_dir)

    def run():
        cnt = 0
        for _ in tqdm.trange(len_test_dataset):
            # data for sampling
            xa_ipt, a_ipt = sess.run(test_next)
            b_ipt = np.copy(a_ipt)
            for test_att_name in args.test_att_names:
                i = args.att_names.index(test_att_name)
                b_ipt[..., i] = 1 - b_ipt[..., i]
                b_ipt = data.check_attribute_conflict(b_ipt, test_att_name,
                                                      args.att_names)

            b__ipt = (b_ipt * 2 - 1).astype(np.float32)  # !!!
            for test_att_name, test_int in zip(args.test_att_names,
                                               args.test_ints):
                i = args.att_names.index(test_att_name)
                b__ipt[..., i] = b__ipt[..., i] * test_int

            x_opt_list = [xa_ipt]
            x_opt = sess.run(x, feed_dict={xa: xa_ipt, b_: b__ipt})
            x_opt_list.append(x_opt)
            sample = np.transpose(x_opt_list, (1, 2, 0, 3, 4))
            sample = np.reshape(sample, (sample.shape[0], -1, sample.shape[2] *
                                         sample.shape[3], sample.shape[4]))

            for s in sample:
                cnt += 1
                im.imwrite(s, '%s/%d.jpg' % (save_dir, cnt))

    return run
Ejemplo n.º 30
0
def sample_graph():
    # ======================================
    # =               graph                =
    # ======================================

    # placeholders & inputs
    xa = tf.placeholder(tf.float32,
                        shape=[None, args.crop_size, args.crop_size, 3])
    a_ = tf.placeholder(tf.float32, shape=[None, n_atts])
    b_ = tf.placeholder(tf.float32, shape=[None, n_atts])

    # sample graph
    x, e, ms, _ = G(xa, b_ - a_, training=False)

    # ======================================
    # =            run function            =
    # ======================================

    save_dir = './output/%s/samples_training' % args.experiment_name
    py.mkdir(save_dir)

    def run(epoch, iter):
        # data for sampling
        xa_ipt, a_ipt = sess.run(val_iter.get_next())
        b_ipt_list = [a_ipt]  # the first is for reconstruction
        for i in range(n_atts):
            tmp = np.array(a_ipt, copy=True)
            tmp[:, i] = 1 - tmp[:, i]  # inverse attribute
            tmp = data.check_attribute_conflict(tmp, args.att_names[i],
                                                args.att_names)
            b_ipt_list.append(tmp)

        x_opt_list = [xa_ipt]
        e_opt_list = [np.full_like(xa_ipt, -1.0)]
        ms_opt_list = []
        a__ipt = a_ipt * 2 - 1
        for i, b_ipt in enumerate(b_ipt_list):
            b__ipt = (b_ipt * 2 - 1).astype(np.float32)  # !!!
            if i > 0:  # i == 0 is for reconstruction
                b__ipt[..., i - 1] = b__ipt[..., i - 1] * args.test_int
            x_opt, e_opt, ms_opt = sess.run([x, e, ms],
                                            feed_dict={
                                                xa: xa_ipt,
                                                a_: a__ipt,
                                                b_: b__ipt
                                            })
            x_opt_list.append(x_opt)
            e_opt_list.append(e_opt)
            ms_opt_list.append(ms_opt)

        # save sample
        sample = np.transpose(x_opt_list, (1, 2, 0, 3, 4))
        sample = np.reshape(
            sample, (-1, sample.shape[2] * sample.shape[3], sample.shape[4]))

        # resize all masks to the same size
        for ms_opt in ms_opt_list:  # attribute axis
            for i, m_opt in enumerate(ms_opt):  # mask level axis
                m_opt_resized = []
                for m_j_opt in m_opt:  # batch axis
                    m_opt_resized.append(
                        im.imresize(m_j_opt * 2 - 1,
                                    (args.crop_size, args.crop_size)))
                ms_opt[i] = np.concatenate([np.array(m_opt_resized)] * 3,
                                           axis=-1)
        ms_opt_list = [np.full_like(ms_opt_list[0], -1.0)] + ms_opt_list
        ms_opt_list = list(np.transpose(ms_opt_list, (1, 0, 2, 3, 4, 5)))[::-1]
        sample_m = np.transpose([x_opt_list, e_opt_list] + ms_opt_list,
                                (2, 0, 3, 1, 4, 5))
        sample_m = np.reshape(
            sample_m,
            (-1, sample_m.shape[3] * sample_m.shape[4], sample_m.shape[5]))
        im.imwrite(np.concatenate((sample, sample_m)),
                   '%s/Epoch-%d_Iter-%d.jpg' % (save_dir, epoch, iter))

    return run