# train discriminator trainer.run_discriminator_one_step(data_i) # Visualizations if iter_counter.needs_printing(): losses = trainer.get_latest_losses() visualizer.print_current_errors(epoch, iter_counter.epoch_iter, losses, iter_counter.time_per_iter) visualizer.plot_current_errors(losses, iter_counter.total_steps_so_far) if iter_counter.needs_displaying(): visuals = OrderedDict([('input_label', data_i['label']), ('synthesized_image', trainer.get_latest_generated()), ('real_image', data_i['image'])]) visualizer.display_current_results(visuals, epoch, iter_counter.total_steps_so_far) if iter_counter.needs_saving(): print('saving the latest model (epoch %d, total_steps %d)' % (epoch, iter_counter.total_steps_so_far)) trainer.save('latest') iter_counter.record_current_iter() trainer.update_learning_rate(epoch) iter_counter.record_epoch_end() if epoch % opt.save_epoch_freq == 0 or \ epoch == iter_counter.total_epochs:
# create tool for visualization visualizer = Visualizer(opt) for epoch in iter_counter.training_epochs(): iter_counter.record_epoch_start(epoch) for i, data in enumerate(dataloader, start=iter_counter.epoch_iter): iter_counter.record_one_iter() trainer.g_losses, trainer.d_losses = {}, {} CT, MR = data['CT'].squeeze(1), data['MR'].squeeze(1) if opt.D_steps_per_G: trainer.run_generator_one_step(CT, MR) trainer.run_discriminator_one_step(CT, MR) data = {**data, **trainer.get_latest_generated()} # Visualizations if iter_counter.needs_printing(): losses = trainer.get_latest_losses() visualizer.print_current_errors(epoch, iter_counter.epoch_iter, losses, iter_counter.time_per_iter) visualizer.plot_current_errors( losses, iter_counter.total_steps_so_far) if iter_counter.needs_displaying(): visuals = OrderedDict([**data]) visualizer.display_current_results( visuals, epoch, iter_counter.total_steps_so_far) if iter_counter.needs_saving():