def __init__(self, opt):
        self.opt = opt
        if 'resnet' in opt.netG:
            from configs.resnet_configs import get_configs
        elif 'spade' in opt.netG:
            from configs.spade_configs import get_configs
        elif 'munit' in opt.netG:
            from configs.munit_configs import get_configs
        else:
            raise NotImplementedError
        self.configs = get_configs(config_name=opt.config_set)

        self.dataloader = create_dataloader(opt)
        model = create_model(opt)
        model.setup(opt)
        for data_i in self.dataloader:
            model.set_input(data_i)
            break
        self.model = model
        self.device = model.device
        self.inception_model, self.drn_model, self.deeplabv2_model = create_metric_models(
            opt, self.device)
        if self.inception_model is not None:
            self.npz = np.load(opt.real_stat_path)
        self.macs_cache = {}
        self.result_cache = {}

        self.log_file = open(os.path.join(opt.output_dir, 'log.txt'), 'a')
        now = time.strftime('%c')
        self.log_file.write('================ (%s) ================\n' % now)
        self.log_file.flush()
    def __init__(self, opt):
        self.opt = opt
        if 'resnet' in opt.netG:
            from configs.resnet_configs import get_configs
        elif 'spade' in opt.netG:
            from configs.spade_configs import get_configs
        else:
            raise NotImplementedError
        self.configs = get_configs(config_name=opt.config_set)

        self.dataloader = create_dataloader(opt)
        model = create_model(opt)
        model.setup(opt)
        for data_i in self.dataloader:
            model.set_input(data_i)
            break
        self.model = model
        self.device = model.device
        self.inception_model, self.drn_model, self.deeplabv2_model = create_metric_models(opt, self.device)
        self.npz = np.load(opt.real_stat_path)
        self.macs_cache = {}
        self.result_cache = {}
Esempio n. 3
0
    def __init__(self, opt):
        assert opt.isTrain
        assert opt.direction == 'AtoB'
        assert opt.dataset_mode == 'unaligned'
        valid_netGs = ['munit', 'mobile_munit']
        assert opt.netG in valid_netGs
        super(MunitModel, self).__init__(opt)
        self.loss_names = ['D_A', 'G_rec_xA', 'G_rec_sA', 'G_rec_cA', 'G_gan_A',
                           'D_B', 'G_rec_xB', 'G_rec_sB', 'G_rec_cB', 'G_gan_B']
        self.visual_names = ['real_A', 'fake_A', 'real_A', 'fake_B']
        self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
        self.netG_A = networks.define_G(opt.netG, init_type=opt.init_type,
                                        init_gain=opt.init_gain, gpu_ids=self.gpu_ids, opt=opt)
        self.netG_B = networks.define_G(opt.netG, init_type=opt.init_type,
                                        init_gain=opt.init_gain, gpu_ids=self.gpu_ids, opt=opt)
        self.netD_A = networks.define_D(opt.netD, input_nc=opt.input_nc, init_type='normal',
                                        init_gain=opt.init_gain, gpu_ids=self.gpu_ids, opt=opt)
        self.netD_B = networks.define_D(opt.netD, input_nc=opt.output_nc, init_type='normal',
                                        init_gain=opt.init_gain, gpu_ids=self.gpu_ids, opt=opt)

        self.criterionGAN = GANLoss(opt.gan_mode).to(self.device)
        self.criterionRec = nn.L1Loss()

        self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
                                            lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay)
        self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),
                                            lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay)
        self.optimizers = [self.optimizer_G, self.optimizer_D]

        self.eval_dataloader_AtoB = create_eval_dataloader(self.opt, direction='AtoB')
        self.eval_dataloader_BtoA = create_eval_dataloader(self.opt, direction='BtoA')
        self.inception_model, _, _ = create_metric_models(opt, self.device)
        self.best_fid_A, self.best_fid_B = 1e9, 1e9
        self.fids_A, self.fids_B = [], []
        self.is_best = False
        self.npz_A = np.load(opt.real_stat_A_path)
        self.npz_B = np.load(opt.real_stat_B_path)
Esempio n. 4
0
    def __init__(self, opt):
        assert opt.isTrain
        valid_netGs = [
            'munit', 'super_munit', 'super_mobile_munit',
            'super_mobile_munit2', 'super_mobile_munit3'
        ]
        assert opt.teacher_netG in valid_netGs and opt.student_netG in valid_netGs
        super(BaseMunitDistiller, self).__init__(opt)
        self.loss_names = [
            'G_gan', 'G_rec_x', 'G_rec_c', 'G_rec_s', 'D_fake', 'D_real'
        ]
        if not opt.student_no_style_encoder:
            self.loss_names.append('G_rec_s')
        self.optimizers = []
        self.image_paths = []
        self.visual_names = ['real_A', 'Sfake_B', 'Tfake_B', 'real_B']
        self.model_names = ['netG_student', 'netG_teacher', 'netD']
        opt_teacher = self.create_option('teacher')
        self.netG_teacher = networks.define_G(opt.teacher_netG,
                                              init_type=opt.init_type,
                                              init_gain=opt.init_gain,
                                              gpu_ids=self.gpu_ids,
                                              opt=opt_teacher)
        opt_student = self.create_option('student')
        self.netG_student = networks.define_G(opt.student_netG,
                                              init_type=opt.init_type,
                                              init_gain=opt.init_gain,
                                              gpu_ids=self.gpu_ids,
                                              opt=opt_student)
        self.netD = networks.define_D(opt.netD,
                                      input_nc=opt.output_nc,
                                      init_type='normal',
                                      init_gain=opt.init_gain,
                                      gpu_ids=self.gpu_ids,
                                      opt=opt)
        if hasattr(opt, 'distiller'):
            self.netA = nn.Conv2d(in_channels=4 * opt.student_ngf,
                                  out_channels=4 * opt.teacher_ngf,
                                  kernel_size=1).to(self.device)
        else:
            self.netA = SuperConv2d(in_channels=4 * opt.student_ngf,
                                    out_channels=4 * opt.teacher_ngf,
                                    kernel_size=1).to(self.device)
        networks.init_net(self.netA)
        self.netG_teacher.eval()

        self.criterionGAN = GANLoss(opt.gan_mode).to(self.device)
        self.criterionRec = torch.nn.L1Loss()

        G_params = []
        G_params.append(self.netG_student.parameters())
        G_params.append(self.netA.parameters())
        self.optimizer_G = torch.optim.Adam(itertools.chain(*G_params),
                                            lr=opt.lr,
                                            betas=(opt.beta1, 0.999),
                                            weight_decay=opt.weight_decay)
        self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr,
                                            betas=(opt.beta1, 0.999),
                                            weight_decay=opt.weight_decay)
        self.optimizers.append(self.optimizer_G)
        self.optimizers.append(self.optimizer_D)

        self.eval_dataloader = create_eval_dataloader(self.opt,
                                                      direction=opt.direction)
        self.inception_model, _, _ = create_metric_models(opt,
                                                          device=self.device)
        self.npz = np.load(opt.real_stat_path)
        self.is_best = False
Esempio n. 5
0
        model.test(config)  # run inference
        visuals = model.get_current_visuals()  # get image results
        generated = visuals['fake_B'].cpu()
        fakes.append(generated)
        for path in model.get_image_paths():
            short_path = ntpath.basename(path)
            name = os.path.splitext(short_path)[0]
            names.append(name)
        if i < opt.num_test:
            save_images(webpage, visuals, model.get_image_paths(), opt)
    webpage.save()  # save the HTML
    device = copy.deepcopy(model.device)
    del model
    torch.cuda.empty_cache()

    inception_model, drn_model, deeplabv2_model = create_metric_models(
        opt, device)
    if inception_model is not None:
        npz = np.load(opt.real_stat_path)
        fid = get_fid(fakes, inception_model, npz, device, opt.batch_size)
        print('fid score: %.2f' % fid, flush=True)

    if drn_model is not None:
        mIoU = get_cityscapes_mIoU(fakes,
                                   names,
                                   drn_model,
                                   device,
                                   table_path=opt.table_path,
                                   data_dir=opt.cityscapes_path,
                                   batch_size=opt.batch_size,
                                   num_workers=opt.num_threads)
        print('mIoU: %.2f' % mIoU)
Esempio n. 6
0
    def __init__(self, opt):
        assert opt.isTrain
        super(BaseResnetDistiller, self).__init__(opt)
        self.loss_names = ['G_gan', 'G_distill', 'G_recon', 'D_fake', 'D_real']
        self.optimizers = []
        self.image_paths = []
        self.visual_names = ['real_A', 'Sfake_B', 'Tfake_B', 'real_B']
        self.model_names = ['netG_student', 'netG_teacher', 'netD']
        self.netG_teacher = networks.define_G(opt.input_nc,
                                              opt.output_nc,
                                              opt.teacher_ngf,
                                              opt.teacher_netG,
                                              opt.norm,
                                              opt.teacher_dropout_rate,
                                              opt.init_type,
                                              opt.init_gain,
                                              self.gpu_ids,
                                              opt=opt)
        self.netG_student = networks.define_G(opt.input_nc,
                                              opt.output_nc,
                                              opt.student_ngf,
                                              opt.student_netG,
                                              opt.norm,
                                              opt.student_dropout_rate,
                                              opt.init_type,
                                              opt.init_gain,
                                              self.gpu_ids,
                                              opt=opt)

        if getattr(opt, 'sort_channels',
                   False) and opt.restore_student_G_path is not None:
            self.netG_student_tmp = networks.define_G(opt.input_nc,
                                                      opt.output_nc,
                                                      opt.student_ngf,
                                                      opt.student_netG.replace(
                                                          'super_', ''),
                                                      opt.norm,
                                                      opt.student_dropout_rate,
                                                      opt.init_type,
                                                      opt.init_gain,
                                                      self.gpu_ids,
                                                      opt=opt)
        if hasattr(opt, 'distiller'):
            self.netG_pretrained = networks.define_G(opt.input_nc,
                                                     opt.output_nc,
                                                     opt.pretrained_ngf,
                                                     opt.pretrained_netG,
                                                     opt.norm,
                                                     0,
                                                     opt.init_type,
                                                     opt.init_gain,
                                                     self.gpu_ids,
                                                     opt=opt)

        if opt.dataset_mode == 'aligned':
            self.netD = networks.define_D(opt.input_nc + opt.output_nc,
                                          opt.ndf, opt.netD, opt.n_layers_D,
                                          opt.norm, opt.init_type,
                                          opt.init_gain, self.gpu_ids)
        elif opt.dataset_mode == 'unaligned':
            self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
                                          opt.n_layers_D, opt.norm,
                                          opt.init_type, opt.init_gain,
                                          self.gpu_ids)
        else:
            raise NotImplementedError('Unknown dataset mode [%s]!!!' %
                                      opt.dataset_mode)

        self.netG_teacher.eval()
        self.criterionGAN = models.modules.loss.GANLoss(opt.gan_mode).to(
            self.device)
        if opt.recon_loss_type == 'l1':
            self.criterionRecon = torch.nn.L1Loss()
        elif opt.recon_loss_type == 'l2':
            self.criterionRecon = torch.nn.MSELoss()
        elif opt.recon_loss_type == 'smooth_l1':
            self.criterionRecon = torch.nn.SmoothL1Loss()
        elif opt.recon_loss_type == 'vgg':
            self.criterionRecon = models.modules.loss.VGGLoss(self.device)
        else:
            raise NotImplementedError(
                'Unknown reconstruction loss type [%s]!' % opt.loss_type)

        if isinstance(self.netG_teacher, nn.DataParallel):
            self.mapping_layers = [
                'module.model.%d' % i for i in range(9, 21, 3)
            ]
        else:
            self.mapping_layers = ['model.%d' % i for i in range(9, 21, 3)]

        self.netAs = []
        self.Tacts, self.Sacts = {}, {}

        G_params = [self.netG_student.parameters()]
        for i, n in enumerate(self.mapping_layers):
            ft, fs = self.opt.teacher_ngf, self.opt.student_ngf
            if hasattr(opt, 'distiller'):
                netA = nn.Conv2d(in_channels=fs * 4, out_channels=ft * 4, kernel_size=1). \
                    to(self.device)
            else:
                netA = SuperConv2d(in_channels=fs * 4, out_channels=ft * 4, kernel_size=1). \
                    to(self.device)
            networks.init_net(netA)
            G_params.append(netA.parameters())
            self.netAs.append(netA)
            self.loss_names.append('G_distill%d' % i)

        self.optimizer_G = torch.optim.Adam(itertools.chain(*G_params),
                                            lr=opt.lr,
                                            betas=(opt.beta1, 0.999))
        self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr,
                                            betas=(opt.beta1, 0.999))
        self.optimizers.append(self.optimizer_G)
        self.optimizers.append(self.optimizer_D)

        self.eval_dataloader = create_eval_dataloader(self.opt,
                                                      direction=opt.direction)
        self.inception_model, self.drn_model, de = create_metric_models(
            opt, device=self.device)
        self.npz = np.load(opt.real_stat_path)
        self.is_best = False