def construct_model(self, crop_size, load_size): self.A_B_dataset, len_dataset = data.make_zip_dataset( self.A_img_paths, self.B_img_paths, self.batch_size, load_size, crop_size, training=True, repeat=False, is_gray_scale=(self.color_depth == 1)) self.len_dataset = len_dataset self.A2B_pool = data.ItemPool(self.pool_size) self.B2A_pool = data.ItemPool(self.pool_size) A_img_paths_test = py.glob( py.join(self.datasets_dir, self.dataset, 'testA'), '*.{}'.format(self.image_ext)) B_img_paths_test = py.glob( py.join(self.datasets_dir, self.dataset, 'testB'), '*.{}'.format(self.image_ext)) A_B_dataset_test, _ = data.make_zip_dataset( A_img_paths_test, B_img_paths_test, self.batch_size, load_size, crop_size, training=False, repeat=True, is_gray_scale=(self.color_depth == 1)) self.test_iter = iter(A_B_dataset_test) self.G_A2B = module.ResnetGenerator(input_shape=(crop_size, crop_size, self.color_depth), output_channels=self.color_depth) self.G_B2A = module.ResnetGenerator(input_shape=(crop_size, crop_size, self.color_depth), output_channels=self.color_depth) self.D_A = module.ConvDiscriminator(input_shape=(crop_size, crop_size, self.color_depth)) self.D_B = module.ConvDiscriminator(input_shape=(crop_size, crop_size, self.color_depth)) self.d_loss_fn, self.g_loss_fn = gan.get_adversarial_losses_fn( self.adversarial_loss_mode) self.cycle_loss_fn = tf.losses.MeanAbsoluteError() self.identity_loss_fn = tf.losses.MeanAbsoluteError() self.G_lr_scheduler = module.LinearDecay( self.lr, self.epochs * self.len_dataset, self.epoch_decay * self.len_dataset) self.D_lr_scheduler = module.LinearDecay( self.lr, self.epochs * self.len_dataset, self.epoch_decay * self.len_dataset) self.G_optimizer = keras.optimizers.Adam( learning_rate=self.G_lr_scheduler, beta_1=self.beta_1) self.D_optimizer = keras.optimizers.Adam( learning_rate=self.D_lr_scheduler, beta_1=self.beta_1)
def make_dataset(img_dir, batch_size, load_size=286, crop_size=256, n_channels=3, training=True, drop_remainder=True, shuffle=True, repeat=1): img_paths = sorted(py.glob(img_dir, '*')) if shuffle: img_paths = np.random.permutation(img_paths) if training: def _map_fn(img): if n_channels == 1: img = tf.image.rgb_to_grayscale(img) img = tf.image.resize(img, [load_size, load_size]) img = tf.image.random_flip_left_right(img) img = tl.center_crop(img, size=crop_size) # img = tf.image.random_crop(img, [crop_size, crop_size, n_channels]) img = tf.clip_by_value(img, 0, 255) / 127.5 - 1 return img else: def _map_fn(img): if n_channels == 1: img = tf.image.rgb_to_grayscale(img) img = tf.image.resize(img, [load_size, load_size]) img = tl.center_crop(img, size=crop_size) img = tf.clip_by_value(img, 0, 255) / 127.5 - 1 return img dataset = tl.disk_image_batch_dataset(img_paths, batch_size, drop_remainder=drop_remainder, map_fn=_map_fn, shuffle=shuffle, repeat=repeat) if drop_remainder: len_dataset = len(img_paths) // batch_size else: len_dataset = int(np.ceil(len(img_paths) / batch_size)) return dataset, len_dataset
# save settings py.args_to_yaml(py.join(output_dir, 'settings.yml'), args) # ============================================================================== # = data = # ============================================================================== # setup dataset if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']: # 32x32 dataset, shape, len_dataset = data.make_32x32_dataset( args.dataset, args.batch_size) n_G_upsamplings = n_D_downsamplings = 3 elif args.dataset == 'celeba': # 64x64 img_paths = py.glob('data/img_align_celeba', '*.jpg') dataset, shape, len_dataset = data.make_celeba_dataset( img_paths, args.batch_size) n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'anime': # 64x64 img_paths = py.glob('data/faces', '*.jpg') dataset, shape, len_dataset = data.make_anime_dataset( img_paths, args.batch_size) n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'japanese': img_paths = py.glob('./dataset/kkanji2', 'U+*') imgs = [] for dir in img_paths: list = py.glob(dir, '*.png')
left = 0 for i in range(img.shape[1]): if np.mean(img[:, i, ...]) + 1 < eps: left += 1 else: break right = 0 for i in range(img.shape[1] - 1, -1, -1): if np.mean(img[:, i, ...]) + 1 < eps: right += 1 else: break return up, down, left, right def work_fn(img_name): img = im.imread(img_name) u, d, l, r = count_edge(img) o = max(u, d, l, r) if o / img.shape[0] < portion: img = img[o:img.shape[0] - o, o:img.shape[1] - o, ...] im.imwrite(img, img_name.replace(img_dir, save_dir)) py.mkdir(save_dir) img_names = py.glob(img_dir, '*') py.run_parallels(work_fn, img_names)
if args.new_run: run_id += 1 if args.run_id is not False: run_id = args.run_id output_dir = py.join(output_dir, f'{run_id:04d}') py.mkdir(output_dir) # save settings py.args_to_yaml(py.join(output_dir, 'settings.yml'), args) # ============================================================================== # = data = # ============================================================================== A_img_paths = py.glob(py.join(args.datasets_dir, args.dataset, 'trainA'), '*.png') B_img_paths = py.glob(py.join(args.datasets_dir, args.dataset, 'trainB'), '*.png') A_B_dataset, len_dataset = data.make_zip_dataset(A_img_paths, B_img_paths, args.batch_size, args.load_size, args.crop_size, training=True, repeat=False) A2B_pool = data.ItemPool(args.pool_size) B2A_pool = data.ItemPool(args.pool_size) A_img_paths_test = py.glob(py.join(args.datasets_dir, args.dataset, 'testA'), '*.png')
use_gpu = torch.cuda.is_available() device = torch.device("cuda" if use_gpu else "cpu") # ============================================================================== # = data = # ============================================================================== # setup dataset if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']: # 32x32 data_loader, shape = data.make_32x32_dataset(args.dataset, args.batch_size, pin_memory=use_gpu) n_G_upsamplings = n_D_downsamplings = 3 elif args.dataset == 'celeba': # 64x64 img_paths = py.glob('/DATASETS/resized_celebA_128/celebA', '*.jpg') data_loader, shape = data.make_celeba_dataset(img_paths, args.batch_size, pin_memory=use_gpu) n_G_upsamplings = n_D_downsamplings = 5 elif args.dataset == 'anime': # 64x64 img_paths = py.glob('data/faces', '*.jpg') data_loader, shape = data.make_anime_dataset(img_paths, args.batch_size, pin_memory=use_gpu) n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'custom': # ====================================== # = custom =
import data import tensorflow as tf import tensorflow.keras as keras import module import tf2gan as gan import matplotlib.pyplot as plt # adversarial_loss_functions d_loss_fn, g_loss_fn = gan.get_adversarial_losses_fn('wgan') #img_paths = py.glob('data/faces', '*.jpg') #dataset, shape, len_dataset = data.make_PETCT_dataset(img_paths, 1) #n_G_upsamplings = n_D_downsamplings = 4 #img_paths = py.glob('data/faces', '*.jpg') img_paths = py.glob('data/STS_RGB', '*.png') dataset, shape, len_dataset = data.make_PETCT_dataset(img_paths, 5) n_G_upsamplings = n_D_downsamplings = 4 # networks # Comment by K.C: # the following commands set the structure of a G model #G = module.ConvGenerator_1(input_shape=(1, 1, args.z_dim), output_channels=shape[-1], n_upsamplings=n_G_upsamplings, name='G_%s' % args.dataset) G = module.G_Split_Unet(input_shape=(shape[0], shape[1], 1), name='G_padding_PETCT', padding='Same') #G = module.G_Split_Unet( input_shape=(shape[0], shape[1],1), padding='valid', name='G_Split_Unet') D1 = module.ConvDiscriminator_2(input_shape=(shape[0], shape[1], 1),
img3=tf.concat([img,img2],axis=2) print('tensor ',img3.shape) imag=img3.numpy() image2 = Image.fromarray(imag[:,:,:3],mode='RGB') image2.show() image3 = Image.fromarray(imag[:,:,2:5],mode='RGB') image3.show() #print('imag \n',imag) adjhfgg''' # ============================================================================== # = data = # ============================================================================== A_imgs = py.glob(py.join('data\\tests', 'trainA'), '*.jpg') len_dataset=len(A_imgs) A_color=createdata(A_imgs) A_Set=data.make_dataset( A_imgs,A_color,args.batch_size, args.load_size, args.crop_size, training=False,work=0) B_imgs = py.glob(py.join('data\\tests', 'trainB'), '*.jpg') B_color=createdata(B_imgs) B_set=data.make_dataset( B_imgs,B_color,args.batch_size, args.load_size, args.crop_size, training=False,work=0) B_lis=list(B_set.as_numpy_iterator()) B_list=[] for b in B_lis: m,=b B_list.append(tf.convert_to_tensor(m))
args = py.args() # output_dir output_dir = py.join( 'output', args.dataset + ('' if args.output_index == '' else '_' + str(args.output_index))) py.mkdir(output_dir) # save settings py.args_to_yaml(py.join(output_dir, 'settings.yml'), args) # ============================================================================== # = data = # ============================================================================== A_img_paths = py.glob(py.join('datasets', args.dataset, 'trainA'), '*.jpg') B_img_paths = py.glob(py.join('datasets', args.dataset, 'trainB'), '*.jpg') A_B_dataset, len_dataset = data.make_zip_dataset( A_img_paths, B_img_paths, args.batch_size, args.load_size, args.crop_size, training=True, repeat=False, augmentation_preset=args.augmentation) A2B_pool = data.ItemPool(args.pool_size) B2A_pool = data.ItemPool(args.pool_size) A_img_paths_test = py.glob(py.join('datasets', args.dataset, 'testA'), '*.jpg')
use_gpu = torch.cuda.is_available() device = torch.device("cuda" if use_gpu else "cpu") # ============================================================================== # = data = # ============================================================================== # setup dataset if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']: # 32x32 data_loader, shape = data.make_32x32_dataset(args.dataset, args.batch_size, pin_memory=use_gpu) n_G_upsamplings = n_D_downsamplings = 3 elif args.dataset == 'celeba': # 64x64 img_paths = py.glob('data/img_align_celeba', '*.jpg') data_loader, shape = data.make_celeba_dataset(img_paths, args.batch_size, pin_memory=use_gpu) n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'anime': # 64x64 img_paths = py.glob('data/faces', '*.jpg') data_loader, shape = data.make_anime_dataset(img_paths, args.batch_size, pin_memory=use_gpu) n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'custom': # ====================================== # = custom =
def createdata(l): n = len(l) lis = [] for k in range(n): s = l[k].split('\\')[-1] s2 = 'data\\tests\\0.1_color\\' + s lis.append(s2) return lis # ============================================================================== # = data = # ============================================================================== B_imgs = py.glob(py.join('data\\tests', 'trainsmall'), '*.jpg') B_color = createdata(B_imgs) B_set = data.make_dataset(B_imgs, B_color, abatch_size, aload_size, acrop_size, training=False, work=0) B_lis = list(tfds.as_numpy(B_set)) B_list = [] for b in B_lis: m, = b B_list.append(tf.convert_to_tensor(m))
def __init__( self, epochs=200, epoch_decay=100, pool_size=50, output_dir='output', datasets_dir="datasets", dataset="drawing", image_ext="png", crop_size=256, load_size=286, batch_size=0, adversarial_loss_mode="lsgan", # ['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'] lr=0.0002, gradient_penalty_mode='none', # ['none', 'dragan', 'wgan-gp']) gradient_penalty_weight=10.0, cycle_loss_weight=0.0, identity_loss_weight=0.0, beta_1=0.5, color_depth=1, progrssive=False): logging.config.fileConfig(fname='log.conf') self.logger = logging.getLogger('dev') if batch_size == 0: batch_size = 1 # later figure out what to do epoch_decay = min(epoch_decay, epochs // 2) self.output_dataset_dir = py.join(output_dir, dataset) py.mkdir(self.output_dataset_dir) py.args_to_yaml( py.join(self.output_dataset_dir, 'settings.yml'), Namespace( epochs=epochs, epoch_decay=epoch_decay, pool_size=pool_size, output_dir=output_dir, datasets_dir=datasets_dir, dataset=dataset, image_ext=image_ext, crop_size=crop_size, load_size=load_size, batch_size=batch_size, adversarial_loss_mode= adversarial_loss_mode, # ['gan', 'hinge_v1', 'hinge_v2', 'lsgan', 'wgan'] lr=lr, gradient_penalty_mode= gradient_penalty_mode, # ['none', 'dragan', 'wgan-gp']) gradient_penalty_weight=gradient_penalty_weight, cycle_loss_weight=cycle_loss_weight, identity_loss_weight=identity_loss_weight, beta_1=beta_1, color_depth=color_depth, progressive=progrssive)) self.sample_dir = py.join(self.output_dataset_dir, 'samples_training') py.mkdir(self.sample_dir) self.epochs = epochs self.epoch_decay = epoch_decay self.pool_size = pool_size self.gradient_penalty_mode = gradient_penalty_mode self.gradient_penalty_weight = gradient_penalty_weight self.cycle_loss_weight = cycle_loss_weight self.identity_loss_weight = identity_loss_weight self.color_depth = color_depth self.adversarial_loss_mode = adversarial_loss_mode self.batch_size = batch_size self.beta_1 = beta_1 self.color_depth = color_depth self.dataset = dataset self.datasets_dir = datasets_dir self.image_ext = image_ext self.progrssive = progrssive self.lr = lr self.crop_size = crop_size self.load_size = load_size self.A_img_paths = py.glob(py.join(datasets_dir, dataset, 'trainA'), '*.{}'.format(image_ext)) self.B_img_paths = py.glob(py.join(datasets_dir, dataset, 'trainB'), '*.{}'.format(image_ext)) # summary self.train_summary_writer = tf.summary.create_file_writer( py.join(self.output_dataset_dir, 'summaries', 'train'))
# ============================================================================== py.arg('--save_path', default='pics/custom_gan.gif') py.arg('--img_dir', default='output/_gan_dragan/samples_training') py.arg('--max_frames', type=int, default=0) args = py.args() py.mkdir(py.directory(args.save_path)) # ============================================================================== # = make gif = # ============================================================================== # modified from https://www.tensorflow.org/alpha/tutorials/generative/dcgan with imageio.get_writer(args.save_path, mode='I', fps=8) as writer: filenames = sorted(py.glob(args.img_dir, '*.jpg')) if args.max_frames: step = len(filenames) // args.max_frames else: step = 1 last = -1 for i, filename in enumerate(filenames[::step]): frame = 2 * (i**0.3) if round(frame) > round(last): last = frame else: continue image = imageio.imread(filename) writer.append_data(image) image = imageio.imread(filename) writer.append_data(image)
drop_remainder=drop_remainder, map_fn=_map_fn, shuffle=shuffle, repeat=repeat) img_shape = (resize, resize, 1) len_dataset = len(img_paths) // batch_size return dataset, img_shape, len_dataset # ============================================================================== # = data = # ============================================================================== # setup dataset dataset_path = './dataset/'+args.dataset img_paths = py.glob(dataset_path, '*.png') # image paths of custom dataset dataset, shape, len_dataset = make_custom_datset(img_paths, args.batch_size) n_G_upsamplings = n_D_downsamplings = 4 # 3 for 32x32 and 4 for 64x64 # ============================================================================== # = model = # ============================================================================== # setup the normalization function for discriminator d_norm = 'layer_norm' # networks G = module.ConvGenerator(input_shape=(1, 1, args.z_dim), output_channels=shape[-1], n_upsamplings=n_G_upsamplings, name='G_%s' % args.dataset) D = module.ConvDiscriminator(input_shape=shape, n_downsamplings=n_D_downsamplings, norm=d_norm, name='D_%s' % args.dataset) G.summary()
py.arg("--bidirectional", type=bool, default=True) py.arg("--pool_size", type=int, default=50) # pool size to store fake samples args = py.args() # output_dir output_dir = py.join("output", args.output_dir) py.mkdir(output_dir) # save settings py.args_to_yaml(py.join(output_dir, "settings.yml"), args) # ============================================================================== # = data = # ============================================================================== A_img_paths = py.glob(py.join(args.datasets_dir, args.dataset, "trainA"), "*.jpg") B_img_paths = py.glob(py.join(args.datasets_dir, args.dataset, "trainB"), "*.jpg") A_B_dataset, len_dataset = data.make_zip_dataset( A_img_paths, B_img_paths, args.batch_size, args.load_size, args.crop_size, args.channels, training=True, repeat=False, ) A2B_pool = data.ItemPool(args.pool_size) B2A_pool = data.ItemPool(args.pool_size)
# ============================================================================== # = param = # ============================================================================== py.arg('--experiment_dir') py.arg('--batch_size', type=int, default=32) test_args = py.args() args = py.args_from_yaml(py.join(test_args.experiment_dir, 'settings.yml')) args.__dict__.update(test_args.__dict__) # ============================================================================== # = test = # ============================================================================== # data A_img_paths_test = py.glob(py.join(args.datasets_dir, args.dataset, 'testA'), '*.jpg') B_img_paths_test = py.glob(py.join(args.datasets_dir, args.dataset, 'testB'), '*.jpg') A_dataset_test = data.make_dataset(A_img_paths_test, args.batch_size, args.load_size, args.crop_size, training=False, drop_remainder=False, shuffle=False, repeat=1) B_dataset_test = data.make_dataset(B_img_paths_test, args.batch_size, args.load_size, args.crop_size, training=False,
def train(): # ===================================== Args ===================================== args = parse_args() output_dir = os.path.join('output', args.dataset) os.makedirs(output_dir, exist_ok=True) settings_path = os.path.join(output_dir, 'settings.json') pylib.args_to_json(settings_path, args) # ===================================== Data ===================================== A_img_paths = pylib.glob( os.path.join(args.datasets_dir, args.dataset, 'trainA'), '*.png') B_img_paths = pylib.glob( os.path.join(args.datasets_dir, args.dataset, 'trainB'), '*.png') print(f'len(A_img_paths) = {len(A_img_paths)}') print(f'len(B_img_paths) = {len(B_img_paths)}') load_size = [args.load_size_height, args.load_size_width] crop_size = [args.crop_size_height, args.crop_size_width] A_B_dataset, len_dataset = data.make_zip_dataset(A_img_paths, B_img_paths, args.batch_size, load_size, crop_size, training=True, repeat=False) A2B_pool = data.ItemPool(args.pool_size) B2A_pool = data.ItemPool(args.pool_size) A_img_paths_test = pylib.glob( os.path.join(args.datasets_dir, args.dataset, 'testA'), '*.png') B_img_paths_test = pylib.glob( os.path.join(args.datasets_dir, args.dataset, 'testB'), '*.png') A_B_dataset_test, _ = data.make_zip_dataset(A_img_paths_test, B_img_paths_test, args.batch_size, load_size, crop_size, training=False, repeat=True) # ===================================== Models ===================================== model_input_shape = crop_size + [ 3 ] # [args.crop_size_height, args.crop_size_width, 3] G_A2B = module.ResnetGenerator(input_shape=model_input_shape, n_blocks=6) G_B2A = module.ResnetGenerator(input_shape=model_input_shape, n_blocks=6) D_A = module.ConvDiscriminator(input_shape=model_input_shape) D_B = module.ConvDiscriminator(input_shape=model_input_shape) d_loss_fn, g_loss_fn = tf2gan.get_adversarial_losses_fn( args.adversarial_loss_mode) cycle_loss_fn = tf.losses.MeanAbsoluteError() identity_loss_fn = tf.losses.MeanAbsoluteError() G_lr_scheduler = module.LinearDecay(args.lr, args.epochs * len_dataset, args.epoch_decay * len_dataset) D_lr_scheduler = module.LinearDecay(args.lr, args.epochs * len_dataset, args.epoch_decay * len_dataset) G_optimizer = tf.keras.optimizers.Adam(learning_rate=G_lr_scheduler, beta_1=args.beta_1) D_optimizer = tf.keras.optimizers.Adam(learning_rate=D_lr_scheduler, beta_1=args.beta_1) # ===================================== Training steps ===================================== @tf.function def train_generators(A, B): with tf.GradientTape() as t: A2B = G_A2B(A, training=True) B2A = G_B2A(B, training=True) A2B2A = G_B2A(A2B, training=True) B2A2B = G_A2B(B2A, training=True) A2A = G_B2A(A, training=True) B2B = G_A2B(B, training=True) A2B_d_logits = D_B(A2B, training=True) B2A_d_logits = D_A(B2A, training=True) A2B_g_loss = g_loss_fn(A2B_d_logits) B2A_g_loss = g_loss_fn(B2A_d_logits) A2B2A_cycle_loss = cycle_loss_fn(A, A2B2A) B2A2B_cycle_loss = cycle_loss_fn(B, B2A2B) A2A_id_loss = identity_loss_fn(A, A2A) B2B_id_loss = identity_loss_fn(B, B2B) G_loss = (A2B_g_loss + B2A_g_loss) + ( A2B2A_cycle_loss + B2A2B_cycle_loss) * args.cycle_loss_weight + ( A2A_id_loss + B2B_id_loss) * args.identity_loss_weight G_grad = t.gradient( G_loss, G_A2B.trainable_variables + G_B2A.trainable_variables) G_optimizer.apply_gradients( zip(G_grad, G_A2B.trainable_variables + G_B2A.trainable_variables)) return A2B, B2A, { 'A2B_g_loss': A2B_g_loss, 'B2A_g_loss': B2A_g_loss, 'A2B2A_cycle_loss': A2B2A_cycle_loss, 'B2A2B_cycle_loss': B2A2B_cycle_loss, 'A2A_id_loss': A2A_id_loss, 'B2B_id_loss': B2B_id_loss } @tf.function def train_discriminators(A, B, A2B, B2A): with tf.GradientTape() as t: A_d_logits = D_A(A, training=True) B2A_d_logits = D_A(B2A, training=True) B_d_logits = D_B(B, training=True) A2B_d_logits = D_B(A2B, training=True) A_d_loss, B2A_d_loss = d_loss_fn(A_d_logits, B2A_d_logits) B_d_loss, A2B_d_loss = d_loss_fn(B_d_logits, A2B_d_logits) D_A_gp = tf2gan.gradient_penalty(functools.partial(D_A, training=True), A, B2A, mode=args.gradient_penalty_mode) D_B_gp = tf2gan.gradient_penalty(functools.partial(D_B, training=True), B, A2B, mode=args.gradient_penalty_mode) D_loss = (A_d_loss + B2A_d_loss) + (B_d_loss + A2B_d_loss) + ( D_A_gp + D_B_gp) * args.gradient_penalty_weight D_grad = t.gradient(D_loss, D_A.trainable_variables + D_B.trainable_variables) D_optimizer.apply_gradients( zip(D_grad, D_A.trainable_variables + D_B.trainable_variables)) return { 'A_d_loss': A_d_loss + B2A_d_loss, 'B_d_loss': B_d_loss + A2B_d_loss, 'D_A_gp': D_A_gp, 'D_B_gp': D_B_gp } def train_step(A, B): A2B, B2A, G_loss_dict = train_generators(A, B) # cannot autograph `A2B_pool` A2B = A2B_pool( A2B) # or A2B = A2B_pool(A2B.numpy()), but it is much slower B2A = B2A_pool(B2A) # because of the communication between CPU and GPU D_loss_dict = train_discriminators(A, B, A2B, B2A) return G_loss_dict, D_loss_dict @tf.function def sample(A, B): A2B = G_A2B(A, training=False) B2A = G_B2A(B, training=False) A2B2A = G_B2A(A2B, training=False) B2A2B = G_A2B(B2A, training=False) return A2B, B2A, A2B2A, B2A2B # ===================================== Runner code ===================================== # epoch counter ep_cnt = tf.Variable(initial_value=0, trainable=False, dtype=tf.int64) # checkpoint checkpoint = tf2lib.Checkpoint(dict(G_A2B=G_A2B, G_B2A=G_B2A, D_A=D_A, D_B=D_B, G_optimizer=G_optimizer, D_optimizer=D_optimizer, ep_cnt=ep_cnt), os.path.join(output_dir, 'checkpoints'), max_to_keep=5) try: # restore checkpoint including the epoch counter checkpoint.restore().assert_existing_objects_matched() except Exception as e: print(e) # summary train_summary_writer = tf.summary.create_file_writer( os.path.join(output_dir, 'summaries', 'train')) # sample test_iter = iter(A_B_dataset_test) sample_dir = os.path.join(output_dir, 'samples_training') os.makedirs(sample_dir, exist_ok=True) # main loop with train_summary_writer.as_default(): for ep in tqdm.trange(args.epochs, desc='Epoch Loop'): if ep < ep_cnt: continue # update epoch counter ep_cnt.assign_add(1) # train for an epoch for A, B in tqdm.tqdm(A_B_dataset, desc='Inner Epoch Loop', total=len_dataset): G_loss_dict, D_loss_dict = train_step(A, B) # # summary tf2lib.summary(G_loss_dict, step=G_optimizer.iterations, name='G_losses') tf2lib.summary(D_loss_dict, step=G_optimizer.iterations, name='D_losses') tf2lib.summary( {'learning rate': G_lr_scheduler.current_learning_rate}, step=G_optimizer.iterations, name='learning rate') # sample if G_optimizer.iterations.numpy() % 100 == 0: A, B = next(test_iter) A2B, B2A, A2B2A, B2A2B = sample(A, B) img = imlib.immerge(np.concatenate( [A, A2B, A2B2A, B, B2A, B2A2B], axis=0), n_rows=6) imlib.imwrite( img, os.path.join( sample_dir, 'iter-%09d.jpg' % G_optimizer.iterations.numpy())) # save checkpoint checkpoint.save(ep)
# save settings py.args_to_yaml(py.join(output_dir, 'settings.yml'), args) # ============================================================================== # = data and model = # ============================================================================== # setup dataset if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']: # 32x32 dataset, shape, len_dataset = data.make_32x32_dataset(args.dataset, args.batch_size) n_G_upsamplings = n_D_downsamplings = 3 elif args.dataset == 'celeba': # 64x64 img_paths = py.glob('data/img_align_celeba', '*.jpg') dataset, shape, len_dataset = data.make_celeba_dataset(img_paths, args.batch_size) n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'anime': # 64x64 img_paths = py.glob('data/faces', '*.jpg') dataset, shape, len_dataset = data.make_anime_dataset(img_paths, args.batch_size) n_G_upsamplings = n_D_downsamplings = 4 elif args.dataset == 'custom': # ====================================== # = custom = # ====================================== img_paths = ... # image paths of custom dataset dataset, shape, len_dataset = data.make_custom_dataset(img_paths, args.batch_size) n_G_upsamplings = n_D_downsamplings = ... # 3 for 32x32 and 4 for 64x64
py.arg('--identity_loss_weight', type=float, default=0.0) py.arg('--pool_size', type=int, default=50) # pool size to store fake samples args = py.args() # output_dir output_dir = py.join('output', args.dataset) py.mkdir(output_dir) # save settings py.args_to_yaml(py.join(output_dir, 'settings.yml'), args) # ============================================================================== # = data = # ============================================================================== A_img_paths = py.glob(py.join(args.datasets_dir, args.dataset, 'trainA'), '*.jpg') #Horse dataset B_img_paths = py.glob(py.join(args.datasets_dir, args.dataset, 'trainB'), '*.jpg') #Zebra Dataset A_B_dataset, len_dataset = data.make_zip_dataset(A_img_paths, B_img_paths, args.batch_size, args.load_size, args.crop_size, training=True, repeat=False) A2B_pool = data.ItemPool(args.pool_size) B2A_pool = data.ItemPool(args.pool_size) A_img_paths_test = py.glob(py.join(args.datasets_dir, args.dataset, 'testA'), '*.jpg')