args.test_ints = args.test_ints * len(args.test_att_names) sess = tl.session() sess.__enter__() # make default # ============================================================================== # = data and model = # ============================================================================== # data test_dataset, len_test_dataset = data.make_celeba_dataset( args.img_dir, args.test_label_path, args.att_names, args.n_samples, load_size=args.load_size, crop_size=args.crop_size, training=False, drop_remainder=False, shuffle=False, repeat=None) test_iter = test_dataset.make_one_shot_iterator() # ============================================================================== # = graph = # ============================================================================== def sample_graph(): # ====================================== # = graph =
py.arg('--experiment_name', default='default') args = py.args() # output_dir output_dir = py.join('output', args.experiment_name) py.mkdir(output_dir) # save settings py.args_to_yaml(py.join(output_dir, 'settings.yml'), args) # others n_atts = len(args.att_names) train_dataset, len_train_dataset = data.make_celeba_dataset(args.img_dir, args.train_label_path, args.att_names, args.batch_size, load_size=args.load_size, crop_size=args.crop_size, training=True, shuffle=False, repeat=None) print(len_train_dataset) print(train_dataset) train_iter = train_dataset.make_one_shot_iterator() sess = tl.session() sess.__enter__() # make default # get the next item with tf.Session() as sess: xa, a = train_iter.get_next() b = tf.random_shuffle(a) b_ = b * 2 - 1
# save settings py.args_to_yaml(py.join(output_dir, 'settings.yml'), args) # ============================================================================== # = data and model = # ============================================================================== # setup dataset if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']: # 32x32 dataset, shape, len_dataset = data.make_32x32_dataset(args.dataset, args.batch_size) n_G_upsamplings = n_D_downsamplings = 3 elif args.dataset == 'celeba': # 64x64 img_paths = py.glob('data/img_align_celeba', '*.jpg') dataset, shape, len_dataset = data.make_celeba_dataset(img_paths, args.batch_size) n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'anime': # 64x64 img_paths = py.glob('data/faces', '*.jpg') dataset, shape, len_dataset = data.make_anime_dataset(img_paths, args.batch_size) n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'custom': # ====================================== # = custom = # ====================================== img_paths = ... # image paths of custom dataset dataset, shape, len_dataset = data.make_custom_dataset(img_paths, args.batch_size) n_G_upsamplings = n_D_downsamplings = ... # 3 for 32x32 and 4 for 64x64 # ======================================
# ============================================================================== # = data = # ============================================================================== # setup dataset if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']: # 32x32 data_loader, shape = data.make_32x32_dataset(args.dataset, args.batch_size, pin_memory=use_gpu) n_G_upsamplings = n_D_downsamplings = 3 elif args.dataset == 'celeba': # 64x64 img_paths = py.glob('data/img_align_celeba', '*.jpg') data_loader, shape = data.make_celeba_dataset(img_paths, args.batch_size, pin_memory=use_gpu) n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'anime': # 64x64 img_paths = py.glob('data/faces', '*.jpg') data_loader, shape = data.make_anime_dataset(img_paths, args.batch_size, pin_memory=use_gpu) n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'custom': # ====================================== # = custom = # ====================================== img_paths = ... # image paths of custom dataset