def main(args): # do not track lambda param, it can be changed after train exp = Experiment(args, ignore=('lambda_', )) print(exp) if exp.found: print('Already exists: SKIPPING') exit(0) np.random.seed(args.seed) tf.random.set_seed(args.seed) # get data train_dataset = get_train_data(args.category, image_size=args.image_size, patch_size=args.patch_size, batch_size=args.batch_size, n_batches=args.n_batches, rotation_range=args.rotation_range, seed=args.seed) test_dataset, test_labels = get_test_data(args.category, image_size=args.image_size, patch_size=args.patch_size, batch_size=args.batch_size) is_object = args.category in objects # build models generator = make_generator(args.latent_size, channels=args.channels, upsample_first=is_object, upsample_type=args.ge_up, bn=args.ge_bn, act=args.ge_act) encoder = make_encoder(args.patch_size, args.latent_size, channels=args.channels, bn=args.ge_bn, act=args.ge_act) discriminator = make_discriminator(args.patch_size, args.latent_size, channels=args.channels, bn=args.d_bn, act=args.d_act) # feature extractor model for evaluation discriminator_features = get_discriminator_features_model(discriminator) # build optimizers generator_encoder_optimizer = O.Adam(args.lr, beta_1=args.ge_beta1, beta_2=args.ge_beta2) discriminator_optimizer = O.Adam(args.lr, beta_1=args.d_beta1, beta_2=args.d_beta2) # reference to the models to use in eval generator_eval = generator encoder_eval = encoder # for smoothing generator and encoder evolution if args.ge_decay > 0: ema = tf.train.ExponentialMovingAverage(decay=args.ge_decay) generator_ema = tf.keras.models.clone_model(generator) encoder_ema = tf.keras.models.clone_model(encoder) generator_eval = generator_ema encoder_eval = encoder_ema # checkpointer checkpoint = tf.train.Checkpoint( generator=generator, encoder=encoder, discriminator=discriminator, generator_encoder_optimizer=generator_encoder_optimizer, discriminator_optimizer=discriminator_optimizer) best_ckpt_path = exp.ckpt(f'ckpt_{args.category}_best') last_ckpt_path = exp.ckpt(f'ckpt_{args.category}_last') # log stuff log, log_file = exp.require_csv(f'log_{args.category}.csv.gz') metrics, metrics_file = exp.require_csv(f'metrics_{args.category}.csv') best_metric = 0. best_recon = float('inf') best_recon_file = exp.path_to(f'best_recon_{args.category}.png') last_recon_file = exp.path_to(f'last_recon_{args.category}.png') # animate generation during training n_preview = 6 train_batch = next(iter(train_dataset))[:n_preview] test_batch = next(iter(test_dataset))[0][:n_preview] latent_batch = tf.random.normal([n_preview, args.latent_size]) if not is_object: # take random patches from test images patch_location = np.random.randint(0, args.image_size - args.patch_size, (n_preview, 2)) test_batch = [ x[i:i + args.patch_size, j:j + args.patch_size, :] for x, (i, j) in zip(test_batch, patch_location) ] test_batch = K.stack(test_batch) video_out = exp.path_to(f'{args.category}.mp4') video_options = dict(fps=30, codec='libx265', quality=4) # see imageio FFMPEG options video_saver = VideoSaver(train_batch, test_batch, latent_batch, video_out, **video_options) video_saver.generate_and_save(generator, encoder) # train loop progress = tqdm(train_dataset, desc=args.category, dynamic_ncols=True) try: for step, image_batch in enumerate(progress, start=1): if step == 1 or args.d_iter == 0: # only for JIT compilation (tf.function) to work d_train = True ge_train = True elif args.d_iter: n_iter = step % (abs(args.d_iter) + 1) # can be in [0, d_iter] d_train = (n_iter != 0) if (args.d_iter > 0) else ( n_iter == 0) # True in [1, d_iter] ge_train = not d_train # True when step == d_iter + 1 else: # d_iter == None: dynamic adjustment d_train = (scores['fake_score'] > 0) or (scores['real_score'] < 0) ge_train = (scores['real_score'] > 0) or (scores['fake_score'] < 0) losses, scores = train_step(image_batch, generator, encoder, discriminator, generator_encoder_optimizer, discriminator_optimizer, d_train, ge_train, alpha=args.alpha, gp_weight=args.gp_weight) if (args.ge_decay > 0) and (step % 10 == 0): ge_vars = generator.variables + encoder.variables ema.apply(ge_vars) # update exponential moving average # tensor to numpy losses = { n: l.numpy() if l is not None else l for n, l in losses.items() } scores = { n: s.numpy() if s is not None else s for n, s in scores.items() } # log step metrics entry = { 'step': step, 'timestamp': pd.to_datetime('now'), **losses, **scores } log = log.append(entry, ignore_index=True) if step % 100 == 0: if args.ge_decay > 0: ge_ema_vars = generator_ema.variables + encoder_ema.variables for v_ema, v in zip(ge_ema_vars, ge_vars): v_ema.assign(ema.average(v)) preview = video_saver.generate_and_save( generator_eval, encoder_eval) if step % 1000 == 0: log.to_csv(log_file, index=False) checkpoint.write(file_prefix=last_ckpt_path) auc, balanced_accuracy = evaluate(generator_eval, encoder_eval, discriminator_features, test_dataset, test_labels, patch_size=args.patch_size, lambda_=args.lambda_) entry = { 'step': step, 'auc': auc, 'balanced_accuracy': balanced_accuracy } metrics = metrics.append(entry, ignore_index=True) metrics.to_csv(metrics_file, index=False) if auc > best_metric: best_metric = auc checkpoint.write(file_prefix=best_ckpt_path) # save last image to inspect it during training imageio.imwrite(last_recon_file, preview) recon = losses['images_reconstruction_loss'] if recon < best_recon: best_recon = recon imageio.imwrite(best_recon_file, preview) progress.set_postfix({ 'AUC': f'{auc:.1%}', 'BalAcc': f'{balanced_accuracy:.1%}', 'BestAUC': f'{best_metric:.1%}', }) except KeyboardInterrupt: checkpoint.write(file_prefix=last_ckpt_path) finally: log.to_csv(log_file, index=False) video_saver.close() # score the test set checkpoint.read(best_ckpt_path) auc, balanced_accuracy = evaluate(generator, encoder, discriminator_features, test_dataset, test_labels, patch_size=args.patch_size, lambda_=args.lambda_) print(f'{args.category}: AUC={auc}, BalAcc={balanced_accuracy}')
def main(args): root = 'runs_' + args.dataset exp = Experiment(args, root=root, main='model', ignore=('cuda', 'device', 'epochs', 'resume')) print(exp) if os.path.exists(exp.path_to('log')) and not args.resume: print('Skipping ...') sys.exit(0) train_data, test_data, in_ch, out = load_dataset(args) train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True) test_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=False) if args.model == 'odenet': model = ODENet(in_ch, out=out, n_filters=args.filters, downsample=args.downsample, method=args.method, tol=args.tol, adjoint=args.adjoint, dropout=args.dropout) else: model = ResNet(in_ch, out=out, n_filters=args.filters, downsample=args.downsample, dropout=args.dropout) model = model.to(args.device) if args.optim == 'sgd': optimizer = SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.wd) elif args.optim == 'adam': optimizer = Adam(model.parameters(), lr=args.lr, weight_decay=args.wd) # print(train_data) # print(test_data) # print(model) # print(optimizer) if args.resume: ckpt = torch.load(exp.ckpt('last')) print('Loaded: {}'.format(exp.ckpt('last'))) model.load_state_dict(ckpt['model']) optimizer.load_state_dict(ckpt['optim']) start_epoch = ckpt['epoch'] + 1 best_accuracy = exp.log['test_acc'].max() print('Resuming from epoch {}: {}'.format(start_epoch, exp.name)) else: metrics = evaluate(test_loader, model, args) best_accuracy = metrics['test_acc'] start_epoch = 1 if args.lrschedule == 'fixed': scheduler = LambdaLR( optimizer, lr_lambda=lambda x: 1) # no-op scheduler, just for cleaner code elif args.lrschedule == 'plateau': scheduler = ReduceLROnPlateau(optimizer, mode='max', patience=args.patience) elif args.lrschedule == 'cosine': scheduler = CosineAnnealingLR(optimizer, args.lrcycle, last_epoch=start_epoch - 2) progress = trange(start_epoch, args.epochs + 1, initial=start_epoch, total=args.epochs) for epoch in progress: metrics = {'epoch': epoch} progress.set_postfix({'Best ACC': f'{best_accuracy:.2%}'}) progress.set_description('TRAIN') train_metrics = train(train_loader, model, optimizer, args) progress.set_description('EVAL') test_metrics = evaluate(test_loader, model, args) is_best = test_metrics['test_acc'] > best_accuracy best_accuracy = max(test_metrics['test_acc'], best_accuracy) metrics.update(train_metrics) metrics.update(test_metrics) save_checkpoint( exp, { 'epoch': epoch, 'params': vars(args), 'model': model.state_dict(), 'optim': optimizer.state_dict(), 'metrics': metrics }, is_best) exp.push_log(metrics) sched_args = metrics[ 'test_acc'] if args.lrschedule == 'plateau' else None scheduler.step(sched_args)