Exemple #1
0
def controller_train(opt, cycle_gan: CycleGANModel,
                     cycle_controller: CycleControllerModel, writer_dict):
    writer = writer_dict['writer']

    # train mode
    cycle_controller.train()

    # eval mode
    cycle_gan.eval()
    iter_start_time = time.time()
    for i in range(0, opt.ctrl_step):
        controller_step = writer_dict['controller_steps']

        cycle_controller.step_A()
        cycle_controller.step_B()

        if (i + 1) % opt.print_freq_controller == 0:
            losses = cycle_controller.get_current_losses()
            t_comp = (time.time() - iter_start_time)
            iter_start_time = time.time()
            message = "Cont: [Ep: %d/%d]" % (
                i, opt.ctrl_step) + "[{}][{}]".format(cycle_controller.arch_A,
                                                      cycle_controller.arch_B)
            message += "[time: %.3f]" % (t_comp)
            for k, v in losses.items():
                message += '[%s: %.3f]' % (k, v)
            tqdm.write(message)
        # write
        writer.add_scalars(
            'Controller/loss', {
                "A": cycle_controller.loss_A.item(),
                "B": cycle_controller.loss_B.item()
            }, controller_step)

        writer.add_scalars(
            'Controller/discriminator', {
                "A": cycle_controller.loss_D_A.item(),
                "B": cycle_controller.loss_D_B.item()
            }, controller_step)
        writer.add_scalars(
            'Controller/inception_score', {
                "A": cycle_controller.loss_IS_A.item(),
                "B": cycle_controller.loss_IS_B.item()
            }, controller_step)

        writer.add_scalars('Controller/adv', {
            "A": cycle_controller.loss_adv_A,
            "B": cycle_controller.loss_adv_B
        }, controller_step)
        writer.add_scalars(
            'Controller/entropy', {
                "A": cycle_controller.loss_entropy_A,
                "B": cycle_controller.loss_entropy_B
            }, controller_step)
        writer.add_scalars(
            'Controller/reward', {
                "A": cycle_controller.loss_reward_A,
                "B": cycle_controller.loss_reward_B
            }, controller_step)

        writer_dict['controller_steps'] = controller_step + 1
Exemple #2
0
 opt.num_threads = 0  # test code only supports num_threads = 0
 opt.batch_size = 1  # test code only supports batch_size = 1
 opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.
 opt.no_flip = True  # no flip; comment this line if results on flipped images are needed.
 dataset = create_dataset(
     opt)  # create a dataset given opt.dataset_mode and other options
 model = CycleGANModel(
     opt)  # create a model given opt.model and other options
 model.setup(
     opt)  # regular setup: load and print networks; create schedulers
 # create results dir
 image_dir = create_results_dir(opt)
 # test with eval mode. This only affects layers like batchnorm and dropout.
 # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
 if opt.eval:
     model.eval()
 for i, data in enumerate(dataset):
     if i >= opt.num_test:  # only apply our model to opt.num_test images.
         break
     model.set_input(data)  # unpack data from data loader
     model.test()  # run inference
     visuals = model.get_current_visuals()  # get image results
     img_path = model.get_image_paths()  # get image paths
     if i % 5 == 0:  # save images to an HTML file
         print('processing (%04d)-th image... %s' % (i, img_path))
     save_images(opt,
                 image_dir,
                 visuals,
                 img_path,
                 aspect_ratio=opt.aspect_ratio,
                 width=opt.display_winsize)