n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'anime': # 64x64 img_paths = py.glob('data/faces', '*.jpg') data_loader, shape = data.make_anime_dataset(img_paths, args.batch_size, pin_memory=use_gpu) n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'custom': # ====================================== # = custom = # ====================================== img_paths = ... # image paths of custom dataset data_loader = data.make_custom_dataset(img_paths, args.batch_size, pin_memory=use_gpu) n_G_upsamplings = n_D_downsamplings = ... # 3 for 32x32 and 4 for 64x64 # ====================================== # = custom = # ====================================== # ============================================================================== # = model = # ============================================================================== # setup the normalization function for discriminator if args.gradient_penalty_mode == 'none': d_norm = 'batch_norm' else: # cannot use batch normalization with gradient penalty d_norm = args.gradient_penalty_d_norm
elif args.dataset == 'celeba': # 64x64 img_paths = py.glob('data/img_align_celeba', '*.jpg') dataset, shape, len_dataset = data.make_celeba_dataset(img_paths, args.batch_size) n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'anime': # 64x64 img_paths = py.glob('data/faces', '*.jpg') dataset, shape, len_dataset = data.make_anime_dataset(img_paths, args.batch_size) n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'custom': # ====================================== # = custom = # ====================================== img_paths = ... # image paths of custom dataset dataset, shape, len_dataset = data.make_custom_dataset(img_paths, args.batch_size) n_G_upsamplings = n_D_downsamplings = ... # 3 for 32x32 and 4 for 64x64 # ====================================== # = custom = # ====================================== # setup the normalization function for discriminator if args.gradient_penalty_mode == 'none': d_norm = 'batch_norm' if args.gradient_penalty_mode in ['dragan', 'wgan-gp']: # cannot use batch normalization with gradient penalty # TODO(Lynn) # Layer normalization is more stable than instance normalization here, # but instance normalization works in other implementations. # Please tell me if you find out the cause. d_norm = 'layer_norm'
# ============================================================================== # = data = # ============================================================================== # setup dataset if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']: # 32x32 data_loader, shape = data.make_32x32_dataset( args.dataset, args.batch_size, args.imb_index, args.imb_ratio, pin_memory=use_gpu) n_G_upsamplings = n_D_downsamplings = 3 elif args.dataset == 'imagenet': # ====================================== # = custom = # ====================================== img_paths = 'data/imagenet_small/train' data_loader, shape = data.make_custom_dataset( img_paths, args.batch_size, resize=32, pin_memory=use_gpu) n_G_upsamplings = n_D_downsamplings = 3 # 3 for 32x32 and 4 for 64x64 # ====================================== # = custom = # ====================================== # ============================================================================== # = model = # ============================================================================== # setup the normalization function for discriminator if args.gradient_penalty_mode == 'none': d_norm = 'batch_norm' else: # cannot use batch normalization with gradient penalty d_norm = args.gradient_penalty_d_norm