def test_step(model, config, inputs, logger=None): """Reconstruction Test step during training.""" outputs = inputs.clone().detach() with torch.no_grad(): (preds, priors, posteriors), stored_vars = model( inputs, config, False, ) # Accumulate preds and select targets targets = outputs[:, config['n_ctx']:] # Compute the reconstruction and prior loss loss_rec = losses.reconstruction_loss(config, preds, targets) if config['beta'] > 0: loss_prior = losses.kl_loss(config, priors, posteriors) loss = loss_rec + config['beta'] * loss_prior else: loss = loss_rec # Logs if logger is not None: logger.scalar('test_loss_rec', loss_rec.item()) logger.scalar('test_loss', loss.item()) if config['beta'] > 0: logger.scalar('test_loss_prior', loss_prior.item())
def train_step(model, config, inputs, optimizer, batch_idx, logger=None): """Training step for the model.""" outputs = inputs.clone().detach() # Forward pass (preds, priors, posteriors), stored_vars = model(inputs, config, False) # Accumulate preds and select targets targets = outputs[:, config['n_ctx']:] # Compute the reconstruction loss loss_rec = losses.reconstruction_loss(config, preds, targets) # Compute the prior loss if config['beta'] > 0: loss_prior = losses.kl_loss(config, priors, posteriors) loss = loss_rec + config['beta'] * loss_prior else: loss_prior = 0. loss = loss_rec # Backward pass and optimizer step optimizer.zero_grad() if config['apex']: from apex import amp with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() optimizer.step() # Logs if logger is not None: logger.scalar('train_loss_rec', loss_rec.item()) logger.scalar('train_loss', loss.item()) if config['beta'] > 0: logger.scalar('train_loss_prior', loss_prior.item()) return preds, targets, priors, posteriors, loss_rec, loss_prior, loss, stored_vars
def train(model_dir, gpu_id, lr, n_iterations, alpha, image_sigma, model_save_iter, batch_size=1): """ model training function :param model_dir: model folder to save to :param gpu_id: integer specifying the gpu to use :param lr: learning rate :param n_iterations: number of training iterations :param alpha: the alpha, the scalar in front of the smoothing laplacian, in MICCAI paper :param image_sigma: the image sigma in MICCAI paper :param model_save_iter: frequency with which to save models :param batch_size: Optional, default of 1. can be larger, depends on GPU memory and volume size """ # prepare model folder if not os.path.isdir(model_dir): os.mkdir(model_dir) print(model_dir) # gpu handling gpu = '/gpu:' + str(gpu_id) os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) config = tf.ConfigProto() config.gpu_options.allow_growth = True config.allow_soft_placement = True set_session(tf.Session(config=config)) # Diffeomorphic network architecture used in MICCAI 2018 paper nf_enc = [16, 32, 32, 32] nf_dec = [32, 32, 32, 32, 16, 3] # prepare the model # in the CVPR layout, the model takes in [image_1, image_2] and outputs [warped_image_1, velocity_stats] # in the experiments, we use image_2 as atlas with tf.device(gpu): # miccai 2018 used xy indexing. model = networks.miccai2018_net(vol_size, nf_enc, nf_dec, use_miccai_int=True, indexing='xy') # compile model_losses = [losses.kl_l2loss(image_sigma), losses.kl_loss(alpha)] model.compile(optimizer=Adam(lr=lr), loss=model_losses) # save first iteration model.save(os.path.join(model_dir, str(0) + '.h5')) train_example_gen = datagenerators.example_gen(train_vol_names) zeros = np.zeros((1, *vol_size, 3)) # train. Note: we use train_on_batch and design out own print function as this has enabled # faster development and debugging, but one could also use fit_generator and Keras callbacks. for step in range(1, n_iterations): # get_data X = next(train_example_gen)[0] # train with tf.device(gpu): train_loss = model.train_on_batch([X, atlas_vol], [atlas_vol, zeros]) if not isinstance(train_loss, list): train_loss = [train_loss] # print print_loss(step, 0, train_loss) # save model with tf.device(gpu): if (step % model_save_iter == 0) or step < 10: model.save(os.path.join(model_dir, str(step) + '.h5'))
def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): net_g, net_d = nets optim_g, optim_d = optims scheduler_g, scheduler_d = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() for batch_idx, (spec, spec_lengths, y, y_lengths) in enumerate(train_loader): spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax) # print('check',mel.shape)/ y_hat, ids_slice, x_mask, z_mask,\ (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(mel, spec_lengths, spec, spec_lengths) # print('check',log_det_j_sum.shape, m_p.shape) y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax ) y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice # NDA is effective? batch_size=y.size(0) y_jig1 = y.view(batch_size,4,-1) rand_idx = torch.randperm(4) y_jig2 = y_jig1[:,rand_idx,:] y_jigsaw = y_jig2.view(batch_size,1,-1) # print(rand_idx) check_idx = torch.tensor([0,1,2,3]) if (rand_idx ==check_idx).sum()==4: y_jigsaw = y_hat else: y_jigsaw = y_jigsaw y_negative = 0.75*y_hat + 0.25*y_jigsaw # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_negative.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) with autocast(enabled=False): loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl loss_fm = feature_loss(fmap_r, fmap_g) loss_gen, losses_gen = generator_loss(y_d_hat_g) loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl optim_g.zero_grad() scaler.scale(loss_gen_all).backward() scaler.unscale_(optim_g) grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) scaler.step(optim_g) scaler.update() if rank==0: if global_step % hps.train.log_interval == 0: lr = optim_g.param_groups[0]['lr'] losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_kl] logger.info('Train Epoch: {} [{:.0f}%]'.format( epoch, 100. * batch_idx / len(train_loader))) logger.info([x.item() for x in losses] + [global_step, lr]) scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/kl": loss_kl}) scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) image_dict = { "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), } utils.summarize( writer=writer, global_step=global_step, images=image_dict, scalars=scalar_dict) if global_step % hps.train.eval_interval == 0: evaluate(hps, net_g, eval_loader, writer_eval) utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) global_step += 1 if rank == 0: logger.info('====> Epoch: {}'.format(epoch))