Exemplo n.º 1
0
def get_nets_dis(path, which_epoch='latest'):
    gpu_ids = [0]
    Tensor = torch.cuda.FloatTensor
    opt = util.load_opt(path)
    # assume caffe style model
    opt.caffe = False
    netD_A = networks.define_D(opt.output_nc,
                               opt.ndf,
                               opt.which_model_netD,
                               opt.n_layers_D,
                               opt.norm,
                               False,
                               opt.init_type,
                               gpu_ids,
                               opt=opt)
    netD_B = networks.define_D(opt.input_nc,
                               opt.ndf,
                               opt.which_model_netD,
                               opt.n_layers_D,
                               opt.norm,
                               False,
                               opt.init_type,
                               gpu_ids,
                               opt=opt)
    load_network_with_path(netD_A, 'D_A', which_epoch, path)
    load_network_with_path(netD_B, 'D_B', which_epoch, path)
    netD_A.cuda()
    netD_B.cuda()
    return {'A': netD_A, 'B': netD_B}
Exemplo n.º 2
0
    def initialize_networks(self, opt):
        self.netGA = networks.define_G(opt, opt['netGA'])
        self.netGB = networks.define_G(opt, opt['netGB'])
        self.netDA = networks.define_D(opt, opt['netDA'])
        self.netDB = networks.define_D(opt, opt['netDB'])
        self.netEA, self.netHairA = networks.define_RES(
            opt, opt['input_nc_A'], opt['netEDA'])
        self.netEB, self.netHairB = networks.define_RES(
            opt, opt['input_nc_B'], opt['netEDB'])

        if self.opt['pretrain']:
            self.train_nets = [
                self.netGA, self.netGB, self.netDA, self.netDB, self.netEA,
                self.netHairA, self.netEB, self.netHairB
            ]
        else:
            self.train_nets = [self.netEA, self.netHairA]

        # set require gradients
        if self.isTrain:
            self.set_requires_grad(self.train_nets, True)
        else:
            self.set_requires_grad(self.train_nets, False)

        if self.use_gpu:
            for i in range(len(self.train_nets)):
                self.train_nets[i] = DataParallelWithCallback(
                    self.train_nets[i], device_ids=opt['gpu_ids'])
            if self.opt['pretrain']:
                self.netGA, self.netGB, self.netDA, self.netDB, self.netEA, \
                    self.netHairA, self.netEB, self.netHairB = self.train_nets
            else:
                self.netEA, self.netHairA = self.train_nets
    def initialize_networks(self, opt):
        netG = networks.define_G(opt)
        # netD = networks.define_D(opt) if opt.isTrain else None
        if opt.isTrain:
            opt.label_nc = opt.label_nc-1
            netD = networks.define_D(opt)
        else:
            netD_fine = None

        netE = networks.define_E(opt) if opt.use_vae else None
        if opt.isTrain:
            opt.label_nc = (opt.label_nc+1)
            netD_fine = networks.define_D(opt)
        else:
            netD_fine = None

        if not opt.isTrain or opt.continue_train:
            netG = util.load_network(netG, 'G', opt.which_epoch, opt)
            if opt.isTrain:
                netD = util.load_network(netD, 'D', opt.which_epoch, opt)
                netD_fine = util.load_network(netD_fine, 'D', opt.which_epoch, opt)
            else:
                netD = None
                netD_fine = None
            if opt.use_vae:
                netE = util.load_network(netE, 'E', opt.which_epoch, opt)

        return netG, netD, netE, netD_fine
Exemplo n.º 4
0
    def initialize_networks(self, opt):

        netG = networks.define_G(opt)
        netD = networks.define_D(opt) if opt.isTrain else None
        netD_rotate = networks.define_D(opt) if opt.isTrain else None
        netE = networks.define_E(opt) if opt.use_vae else None
        pretrained_path = ''
        if not opt.isTrain or opt.continue_train:
            self.load_network(netG, 'G', opt.which_epoch, pretrained_path)
            if opt.isTrain and not opt.noload_D:
                self.load_network(netD, 'D', opt.which_epoch, pretrained_path)
                self.load_network(netD_rotate, 'D_rotate', opt.which_epoch,
                                  pretrained_path)
            if opt.use_vae:
                self.load_network(netE, 'E', opt.which_epoch, pretrained_path)
        else:

            if opt.load_separately:
                netG = self.load_separately(netG, 'G', opt)
                if not opt.noload_D:
                    netD = self.load_separately(netD, 'D', opt)
                    netD_rotate = self.load_separately(netD_rotate, 'D_rotate',
                                                       opt)
                if opt.use_vae:
                    netE = self.load_separately(netE, 'E', opt)

        return netG, netD, netE, netD_rotate
Exemplo n.º 5
0
    def initialize_networks(self, opt, end2end=False, triple=False):
        if opt.end2endtri:
            netG_1 = networks.define_G(opt, triple)
            netD_1 = networks.define_D(opt, triple)
        netG = networks.define_G(opt)
        netD = networks.define_D(opt) if opt.isTrain else None
        netE = networks.define_E(opt) if opt.use_vae else None

        if not opt.isTrain or opt.continue_train:
            if opt.end2endtri:
                netG_1 = util.load_network(netG_1, 'G', opt.which_triple_epoch,
                                           opt, triple)
            netG = util.load_network(netG, 'G', opt.which_epoch, opt)
            if opt.isTrain:  # and end2end:
                netD = util.load_network(netD, 'D', opt.which_epoch, opt)
                if opt.end2endtri:
                    netD_1 = util.load_network(netD_1, 'D',
                                               opt.which_triple_epoch, opt,
                                               triple)
            if opt.use_vae:
                netE = util.load_network(netE, 'E', opt.which_epoch, opt)
        if not opt.end2endtri:
            netG_1 = None
            netD_1 = None

        return netG, netD, netE, netG_1, netD_1
Exemplo n.º 6
0
    def __init__(self, opt, cur_stage):

        BaseModel.__init__(self, opt)

        self.loss_names = ["A", "B",
                           'D_A', 'D_B',
                           'IS_A', 'IS_B',
                           'reward_A', 'reward_B',
                           "adv_A", "adv_B",
                           "entropy_A", "entropy_B"]

        visual_names_A = ['real_A', 'fake_B']
        visual_names_B = ['real_B', 'fake_A']

        self.visual_names = visual_names_A + visual_names_B

        self.cur_stage = cur_stage
        self.model_names = ["C_A", "C_B"]
        self.ctrl_sample_batch = opt.ctrl_sample_batch

        self.netC_A = Controller(opt, self.cur_stage)
        self.netC_B = Controller(opt, self.cur_stage)

        self.netD_A = networks.define_D(3, 64, "basic", norm='instance')
        self.netD_B = networks.define_D(3, 64, "basic", norm='instance')
        load_saves(self.netD_A, "res", "D_A", os.path.join(opt.path, "pre_mod"))
        load_saves(self.netD_B, "res", "D_B", os.path.join(opt.path, "pre_mod"))
        self.loss = networks.GANLoss("lsgan")

        self.prev_hiddens_A = None
        self.prev_archs_A = None

        self.prev_hiddens_B = None
        self.prev_archs_B = None

        if len(self.gpu_ids) != 0:
            self.cuda()

        networks.init_weights(self.netC_A, opt.init_type, opt.init_gain)
        networks.init_weights(self.netC_A, opt.init_type, opt.init_gain)

        self.optimizers_names = ["A", "B"]
        self.optimizerA = torch.optim.Adam(filter(lambda p: p.requires_grad, self.netC_A.parameters()),
                                           opt.ctrl_lr, (0.0, 0.9))
        self.optimizerB = torch.optim.Adam(filter(lambda p: p.requires_grad, self.netC_A.parameters()),
                                           opt.ctrl_lr, (0.0, 0.9))

        self.optimizers.append(self.optimizerA)
        self.optimizers.append(self.optimizerB)

        self.valid_dataloader = create_dataset(opt, valid=True)
        self.baseline_decay = opt.baseline_decay
        self.entropy_coeff = opt.entropy_coeff

        self.netG_A = None
        self.netG_B = None

        self.baseline_A = None
        self.baseline_B = None
Exemplo n.º 7
0
    def __init__(self, opt, device):
        super(CycleGAN, self).__init__()

        self.device = device
        self.opt = opt

        self.netG_A = networks.define_G(self.opt.input_nc, self.opt.output_nc,
                                        self.opt.ngf, self.opt.netG,
                                        self.opt.norm, self.opt.dropout,
                                        self.opt.init_type, self.opt.init_gain,
                                        self.opt.task_num,
                                        self.opt.netG_A_filter_list)
        self.netG_B = networks.define_G(self.opt.input_nc, self.opt.output_nc,
                                        self.opt.ngf, self.opt.netG,
                                        self.opt.norm, self.opt.dropout,
                                        self.opt.init_type, self.opt.init_gain,
                                        self.opt.task_num,
                                        self.opt.netG_B_filter_list)

        if opt.train:
            self.netD_A = networks.define_D(self.opt.input_nc, self.opt.ndf,
                                            self.opt.netD, self.opt.norm,
                                            self.opt.init_type,
                                            self.opt.init_gain)
            self.netD_B = networks.define_D(self.opt.input_nc, self.opt.ndf,
                                            self.opt.netD, self.opt.norm,
                                            self.opt.init_type,
                                            self.opt.init_gain)

            self.fake_A_pool = ImageBuffer(
                self.opt.pool_size
            )  # create image buffer to store previously generated images
            self.fake_B_pool = ImageBuffer(
                self.opt.pool_size
            )  # create image buffer to store previously generated images

            self.criterionGAN = networks.GANLoss(self.opt.gan_mode).to(
                self.device)  # define GAN loss.
            self.criterionCycle = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()

            self.optimizer_G = torch.optim.Adam(itertools.chain(
                self.netG_A.parameters(), self.netG_B.parameters()),
                                                lr=self.opt.lr,
                                                betas=(self.opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(itertools.chain(
                self.netD_A.parameters(), self.netD_B.parameters()),
                                                lr=self.opt.lr,
                                                betas=(self.opt.beta1, 0.999))

            self.optimizers = []
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)

            self.schedulers = [
                networks.get_scheduler(optimizer, opt)
                for optimizer in self.optimizers
            ]
Exemplo n.º 8
0
    def __init__(self, opt):
        """Initialize the CycleGAN class.

        Parameters:
            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseModel.__init__(self, opt)
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
        # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
        visual_names_A = ['real_A', 'fake_B', 'rec_A']
        visual_names_B = ['real_B', 'fake_A', 'rec_B']
        if self.isTrain and self.opt.lambda_identity > 0.0:  # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
            visual_names_A.append('idt_B')
            visual_names_B.append('idt_A')

        self.visual_names = visual_names_A + visual_names_B  # combine visualizations for A and B
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
        if self.isTrain:
            self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
        else:  # during test time, only load Gs
            self.model_names = ['G_A', 'G_B']

        # define networks (both Generators and discriminators)
        # The naming is different from those used in the paper.
        # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
        self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
                                        not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
                                        not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:  # define discriminators
            self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
            self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:
            if opt.lambda_identity > 0.0:  # only works when input and output images have the same number of channels
                assert(opt.input_nc == opt.output_nc)
            self.fake_A_pool = ImagePool(opt.pool_size)  # create image buffer to store previously generated images
            self.fake_B_pool = ImagePool(opt.pool_size)  # create image buffer to store previously generated images
            # define loss functions
            self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)  # define GAN loss.
            self.criterionCycle = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()
            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
Exemplo n.º 9
0
    def define_networks(self, start_epoch):
        opt = self.opt        
        # Generator network        
        input_nc = opt.label_nc if (opt.label_nc != 0 and not self.pose) else opt.input_nc
        netG_input_nc = input_nc           
        opt.for_face = False        
        self.netG = networks.define_G(opt)        
        if self.refine_face:            
            opt_face = copy.deepcopy(opt)
            opt_face.n_downsample_G -= 1
            if opt_face.n_adaptive_layers > 0: opt_face.n_adaptive_layers -= 1
            opt_face.input_nc = opt.output_nc
            opt_face.fineSize = self.faceRefiner.face_size
            opt_face.aspect_ratio = 1
            opt_face.for_face = True
            self.netGf = networks.define_G(opt_face)

        # Discriminator network
        if self.isTrain or opt.finetune:            
            netD_input_nc = input_nc + opt.output_nc + (1 if self.concat_fg_mask_for_D else 0)
            if self.concat_ref_for_D:
                netD_input_nc *= 2
            self.netD = networks.define_D(opt, netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm_D, opt.netD_subarch, 
                                          opt.num_D, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids)            
            if self.add_face_D:
                self.netDf = networks.define_D(opt, opt.output_nc * 2, opt.ndf, opt.n_layers_D, opt.norm_D, 'n_layers',
                                               1, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids)
            else:
                self.netDf = None
        self.temporal = False
        self.netDT = None             
                    
        print('---------- Networks initialized -------------')

        # initialize optimizers
        if self.isTrain:            
            # optimizer G
            params = list(self.netG.parameters())           
            if self.refine_face: params += list(self.netGf.parameters())
            self.optimizer_G = self.get_optimizer(params, for_discriminator=False)

            # optimizer D            
            params = list(self.netD.parameters())
            if self.add_face_D: params += list(self.netDf.parameters())
            self.optimizer_D = self.get_optimizer(params, for_discriminator=True)           

        print('---------- Optimizers initialized -------------')

        # make model temporal by generating multiple frames
        if (not opt.isTrain or start_epoch > opt.niter_single) and opt.n_frames_G > 1:
            self.make_temporal_model() 
Exemplo n.º 10
0
    def initialize_networks(self, opt):
        netG_for_CT = networks.define_G(opt)
        netD_aligned = networks.define_D(opt) if opt.isTrain else None
        netG_for_MR = networks.define_G(opt)
        netD_unaligned = networks.define_D(opt) if opt.isTrain else None
        
        if not opt.isTrain or opt.continue_train:
            netG_for_CT = util.load_network(netG_for_CT, 'G_for_CT', opt.which_epoch, opt)
            netG_for_MR = util.load_network(netG_for_MR, 'G_for_MR', opt.which_epoch, opt)
            if opt.isTrain:
                netD_aligned = util.load_network(netD_aligned, 'D_aligned', opt.which_epoch, opt)
                netD_unaligned = util.load_network(netD_unaligned, 'D_unaligned', opt.which_epoch, opt)

        return netG_for_CT, netD_aligned, netG_for_MR, netD_unaligned
Exemplo n.º 11
0
    def __init__(self, args):
        self.args = args
        Tensor = torch.cuda.FloatTensor if args.gpu_ids else torch.Tensor
        use_sigmoid = args.no_lsgan

        # Global discriminator

        self.netD = networks.define_D(args.input_nc, args.ndf,
                                      args.which_model_netD, args.n_layers_D,
                                      args.norm, use_sigmoid, args.gpu_ids)

        # Local discriminator

        self.netD_local = networks.define_D(args.input_nc, args.ndf,
                                            args.which_model_netD,
                                            args.n_layers_D, args.norm,
                                            use_sigmoid, args.gpu_ids)

        # Generator

        self.netG = TransformerNet(args.norm, args.affine_state)

        self.gan_loss = networks.GANLoss(use_lsgan=not args.no_lsgan,
                                         tensor=Tensor)

        self.identity_criterion = torch.nn.L1Loss()

        # Resume

        if args.resume_netG != '':
            self.netG.load_state_dict(torch.load(args.resume_netG))
        if args.resume_netD != '':
            self.netD.load_state_dict(torch.load(args.resume_netD))
        if args.resume_netD_local != '':
            self.netD_local.load_state_dict(torch.load(args.resume_netD_local))

        # optimizer

        self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                            lr=args.lr,
                                            betas=(args.beta1, 0.999))
        self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=args.lr,
                                            betas=(args.beta1, 0.999))
        self.optimizer_D_local = torch.optim.Adam(self.netD_local.parameters(),
                                                  lr=args.lr,
                                                  betas=(args.beta1, 0.999))
        if self.args.cuda:
            self.netD = self.netD.cuda()
            self.netG = self.netG.cuda()
Exemplo n.º 12
0
    def init_temporal_model(self):
        opt = self.opt
        self.temporal = True
        self.netG.init_temporal_network()
        self.netG.cuda()

        if opt.isTrain:
            self.lossCollector.tD = min(opt.n_frames_D, opt.n_frames_G)
            params = list(self.netG.parameters())
            if self.refine_face: params += list(self.netGf.parameters())
            self.optimizer_G = self.get_optimizer(params,
                                                  for_discriminator=False)

            # temporal discriminator
            self.netDT = networks.define_D(opt,
                                           opt.output_nc *
                                           self.lossCollector.tD,
                                           opt.ndf,
                                           opt.n_layers_D,
                                           opt.norm_D,
                                           'n_layers',
                                           1,
                                           not opt.no_ganFeat_loss,
                                           gpu_ids=self.gpu_ids)
            # optimizer D
            params = list(self.netD.parameters()) + list(
                self.netDT.parameters())
            if self.add_face_D: params += list(self.netDf.parameters())
            self.optimizer_D = self.get_optimizer(params,
                                                  for_discriminator=True)

            Visualizer.vis_print(
                self.opt,
                '---------- Now start training multiple frames -------------')
Exemplo n.º 13
0
    def __init__(self, opt):
        BaseModel.__init__(self, opt)
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
        # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
        self.visual_names = ['real_A', 'fake_B', 'real_B']
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
        if self.isTrain:
            self.model_names = ['G', 'D']
        else:  # during test time, only load G
            self.model_names = ['G']
        # define networks (both generator and discriminator)
        self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
                                      not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:  # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
            self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
                                          opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:
            # define loss functions
            self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
            self.criterionL1 = torch.nn.L1Loss()
            
            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
Exemplo n.º 14
0
    def make_temporal_model(self):
        opt = self.opt
        self.temporal = True
        self.netG.set_flow_prev()
        self.netG.cuda()

        if opt.isTrain:
            self.lossCollector.tD = min(opt.n_frames_D, opt.n_frames_G)  
            if opt.finetune_all:      
                params = list(self.netG.parameters())
            else:
                train_names = ['flow_network_temp']
                if opt.spade_combine: 
                    train_names += ['img_warp_embedding', 'mlp_gamma3', 'mlp_beta3']
                params, _ = self.get_train_params(self.netG, train_names) 
                    
            if self.refine_face: params += list(self.netGf.parameters())
            self.optimizer_G = self.get_optimizer(params, for_discriminator=False)
            
            # temporal discriminator
            self.netDT = networks.define_D(opt, opt.output_nc * self.lossCollector.tD, opt.ndf, opt.n_layers_D, opt.norm_D, 'n_layers',
                                           1, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids)
            # optimizer D            
            params = list(self.netD.parameters()) + list(self.netDT.parameters())
            if self.add_face_D: params += list(self.netDf.parameters())
            self.optimizer_D = self.get_optimizer(params, for_discriminator=True)           

            print('---------- Now start training multiple frames -------------')
Exemplo n.º 15
0
    def initialize_networks(self, opt):
        net = {}
        net['netG'] = networks.define_G(opt)
        net['netD'] = networks.define_D(opt) if opt.isTrain else None
        net['netCorr'] = networks.define_Corr(opt)
        net['netDomainClassifier'] = networks.define_DomainClassifier(
            opt) if opt.weight_domainC > 0 and opt.domain_rela else None

        if not opt.isTrain or opt.continue_train:
            net['netG'] = util.load_network(net['netG'], 'G', opt.which_epoch,
                                            opt)
            if opt.isTrain:
                net['netD'] = util.load_network(net['netD'], 'D',
                                                opt.which_epoch, opt)
            net['netCorr'] = util.load_network(net['netCorr'], 'Corr',
                                               opt.which_epoch, opt)
            if opt.weight_domainC > 0 and opt.domain_rela:
                net['netDomainClassifier'] = util.load_network(
                    net['netDomainClassifier'], 'DomainClassifier',
                    opt.which_epoch, opt)
            if (not opt.isTrain) and opt.use_ema:
                net['netG'] = util.load_network(net['netG'], 'G_ema',
                                                opt.which_epoch, opt)
                net['netCorr'] = util.load_network(net['netCorr'],
                                                   'netCorr_ema',
                                                   opt.which_epoch, opt)
        return net
Exemplo n.º 16
0
    def __init__(self, args, logger):
        super().__init__(args, logger)
        # specify the training losses you want to print out. The program will call base_model.get_current_losses
        self.loss_names = ['loss_G', 'loss_D']
        # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
        self.model_names = ['G', 'D']

        self.sample_names = ['fake_B', 'real_A', 'real_B']
        # load/define networks
        self.G = networks.define_G(args.input_nc, args.output_nc, args.ngf,
                                      args.which_model_netG, args.norm, not args.no_dropout, args.init_type, args.init_gain, self.gpu_ids)

        if not 'continue_train' in args:
            use_sigmoid = args.no_lsgan
            self.D = networks.define_D(args.input_nc + args.output_nc, args.ndf,
                                          args.which_model_netD,
                                          args.n_layers_D, args.norm, use_sigmoid, args.init_type, args.init_gain, self.gpu_ids)

            self.fake_AB_pool = ImagePool(args.pool_size)
            # define loss functions
            self.criterionGAN = networks.GANLoss(use_lsgan=not args.no_lsgan).to(self.device)
            self.criterionL1 = torch.nn.L1Loss()

            # initialize optimizers
            self.optimizers = []
            self.optimizer_G = torch.optim.Adam(self.G.parameters(),
                                                lr=args.g_lr, betas=(args.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(self.D.parameters(),
                                                lr=args.d_lr, betas=(args.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
Exemplo n.º 17
0
    def __init__(self, opt):
        BaseModel.__init__(self, opt)

        self.netP = networks.define_P(opt, weight_path=opt.parse_net_weight)
        self.netG = networks.define_G(opt, use_norm='spectral_norm')

        if self.isTrain:
            self.netD = networks.define_D(opt, opt.Dinput_nc, use_norm='spectral_norm') 
            self.vgg_model = loss.PCPFeat(weight_path='./pretrain_models/vgg19-dcbb9e9d.pth').to(opt.device)
            if len(opt.gpu_ids) > 0:
                self.vgg_model = torch.nn.DataParallel(self.vgg_model, opt.gpu_ids, output_device=opt.device)

        self.model_names = ['G']
        self.loss_names = ['Pix', 'PCP', 'G', 'FM', 'D', 'SS'] # Generator loss, fm loss, parsing loss, discriminator loss
        self.visual_names = ['img_LR', 'img_HR', 'img_SR', 'ref_Parse', 'hr_mask']
        self.fm_weights = [1**x for x in range(opt.D_num)]

        if self.isTrain:
            self.model_names = ['G', 'D']
            self.load_model_names = ['G', 'D']

            self.criterionParse = torch.nn.CrossEntropyLoss().to(opt.device)
            self.criterionFM = loss.FMLoss().to(opt.device)
            self.criterionGAN = loss.GANLoss(opt.gan_mode).to(opt.device)
            self.criterionPCP = loss.PCPLoss(opt)
            self.criterionPix= nn.L1Loss()
            self.criterionRS = loss.RegionStyleLoss()

            self.optimizer_G = optim.Adam([p for p in self.netG.parameters() if p.requires_grad], lr=opt.g_lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = optim.Adam([p for p in self.netD.parameters() if p.requires_grad], lr=opt.d_lr, betas=(opt.beta1, 0.999))
            self.optimizers = [self.optimizer_G, self.optimizer_D]
Exemplo n.º 18
0
    def initialize_networks(self, opt):
        netG = networks.define_G(opt)
        opt.input_nc = 2
        netD1 = networks.define_D(opt) if opt.isTrain else None
        opt.input_nc = 7
        netD2 = networks.define_D(opt) if opt.isTrain else None
        netE = networks.define_E(opt) if opt.use_vae else None

        if not opt.isTrain or opt.continue_train:
            netG = util.load_network(netG, 'G', opt.which_epoch, opt)
            if opt.isTrain:
                netD1 = util.load_network(netD1, 'D1', opt.which_epoch, opt)
                netD2 = util.load_network(netD2, 'D2', opt.which_epoch, opt)
            if opt.use_vae:
                netE = util.load_network(netE, 'E', opt.which_epoch, opt)

        return netG, netD1, netD2, netE
Exemplo n.º 19
0
    def initialize_networks(self, opt):
        netG = networks.define_G(opt)
        netD = networks.define_D(opt) if opt.isTrain else None
        netD2 = networks.define_D(
            opt) if opt.isTrain and opt.unpairTrain else None
        netE = networks.define_E(
            opt) if opt.use_vae else None  # this is for original spade network
        netIG = networks.define_IG(
            opt
        ) if opt.use_ig else None  # this is the orient inpainting network
        netSIG = networks.define_SIG(
            opt
        ) if opt.use_stroke else None  # this is the stroke orient inpainting network
        netFE = networks.define_FE(
            opt
        ) if opt.use_instance_feat else None  # this is the feat encoder from pix2pixHD
        netB = networks.define_B(opt) if opt.use_blender else None

        if not opt.isTrain or opt.continue_train:
            # if the pth exist
            save_filename = '%s_net_%s.pth' % (opt.which_epoch, 'G')
            save_dir = os.path.join(opt.checkpoints_dir, opt.name)
            G_path = os.path.join(save_dir, save_filename)
            if os.path.exists(G_path):

                netG = util.load_network(netG, 'G', opt.which_epoch, opt)
                if opt.fix_netG:
                    netG.eval()
                if opt.use_blender:
                    netB = util.load_blend_network(netB, 'B', opt.which_epoch,
                                                   opt)
                if opt.isTrain:
                    netD = util.load_network(netD, 'D', opt.which_epoch, opt)
                    if opt.unpairTrain:
                        netD2 = util.load_network(netD2, 'D', opt.which_epoch,
                                                  opt)
                if opt.use_vae:
                    netE = util.load_network(netE, 'E', opt.which_epoch, opt)
        if opt.use_ig:
            netIG = util.load_inpainting_network(netIG, opt)
            netIG.eval()
        if opt.use_stroke:
            netSIG = util.load_sinpainting_network(netSIG, opt)
            netSIG.eval()

        return netG, netD, netE, netIG, netFE, netB, netD2, netSIG
Exemplo n.º 20
0
def discriminate_sep(disc_model_path, image_a_path, image_b_path):

    # create the discriminator 
    netD = networks.define_D(input_nc=6, 
                             ndf=64, 
                             netD="basic",
                             n_layers_D=3, 
                             norm="batch", 
                             init_type="normal", 
                             init_gain=0.02, 
                             gpu_ids=[0])

    print('loading the model from %s' % disc_model_path)
    device = torch.device('cuda:0')
    state_dict = torch.load(disc_model_path, map_location=str(device))

    if hasattr(state_dict, '_metadata'):
        del state_dict._metadata

    netD.module.load_state_dict(state_dict)

    transform_list = []
    method=Image.BICUBIC

    # bring image to certain size
    load_size = 286
    osize = [load_size, load_size]
    transform_list.append(transforms.Resize(osize, method))

    # crop image to right dimensions
    crop_size = 256
    transform_list.append(transforms.RandomCrop(crop_size))

    # transform image to tensor
    transform_list += [transforms.ToTensor()]
    
    # normalize image
    transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]

    transform_data = transforms.Compose(transform_list)

    img_A_tensor = transform_data(Image.open(image_a_path).convert('RGB'))
    img_B_tensor = transform_data(Image.open(image_b_path).convert('RGB'))

    # adding dimension
    img_A_tensor = img_A_tensor.unsqueeze(0)
    img_B_tensor = img_B_tensor.unsqueeze(0)

    real_A = img_A_tensor.to(device)
    real_B = img_B_tensor.to(device)

    real_AB = torch.cat((real_A, real_B), 1)

    pred = netD(real_AB.detach())

    print("Image {}: discriminator: {}".format(image_a_path, pred.mean()))
Exemplo n.º 21
0
    def initialize_networks(self, opt):
        netG = networks.define_G(opt)
        netD = networks.define_D(opt) if opt.isTrain else None

        if not opt.isTrain or opt.continue_train:
            netG = util.load_network(netG, 'G', opt.which_epoch, opt)
            if opt.isTrain:
                netD = util.load_network(netD, 'D', opt.which_epoch, opt)

        return netG, netD
Exemplo n.º 22
0
    def initialize(self, opt, net):
        BaseModel.initialize(self, opt)
        self.net = net.to(self.device)
        self.edge_map = EdgeMap(scale=1).to(self.device)

        if self.isTrain:
            # define loss functions
            self.vgg = losses.Vgg19(requires_grad=False).to(self.device)
            self.loss_dic = losses.init_loss(opt, self.Tensor)
            vggloss = losses.ContentLoss()
            vggloss.initialize(losses.VGGLoss(self.vgg))
            self.loss_dic['t_vgg'] = vggloss

            cxloss = losses.ContentLoss()
            if opt.unaligned_loss == 'vgg':
                cxloss.initialize(
                    losses.VGGLoss(self.vgg, weights=[0.1], indices=[31]))
            elif opt.unaligned_loss == 'ctx':
                cxloss.initialize(
                    losses.CXLoss(self.vgg,
                                  weights=[0.1, 0.1, 0.1],
                                  indices=[8, 13, 22]))
            elif opt.unaligned_loss == 'mse':
                cxloss.initialize(nn.MSELoss())
            elif opt.unaligned_loss == 'ctx_vgg':
                cxloss.initialize(
                    losses.CXLoss(self.vgg,
                                  weights=[0.1, 0.1, 0.1, 0.1],
                                  indices=[8, 13, 22, 31],
                                  criterions=[losses.CX_loss] * 3 +
                                  [nn.L1Loss()]))

            else:
                raise NotImplementedError

            self.loss_dic['t_cx'] = cxloss

            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(self.net.parameters(),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999),
                                                weight_decay=opt.wd)

            self._init_optimizer([self.optimizer_G])

            # define discriminator
            # if self.opt.lambda_gan > 0:
            self.netD = networks.define_D(opt, 3)
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self._init_optimizer([self.optimizer_D])

        if opt.no_verbose is False:
            self.print_network()
Exemplo n.º 23
0
    def initialize_networks(self, opt):
        netG = None
        netD = None
        netE = None
        netV = None
        netA = None
        netA_sync = None
        if opt.train_recognition:
            netV = networks.define_V(opt)
        elif opt.train_sync:
            netA_sync = networks.define_A_sync(opt) if opt.use_audio else None
            netE = networks.define_E(opt)
        else:

            netG = networks.define_G(opt)
            netA = networks.define_A(
                opt) if opt.use_audio and opt.use_audio_id else None
            netA_sync = networks.define_A_sync(opt) if opt.use_audio else None
            netE = networks.define_E(opt)
            netV = networks.define_V(opt)

            if opt.isTrain:
                netD = networks.define_D(opt)

        if not opt.isTrain or opt.continue_train:
            self.load_network(netG, 'G', opt.which_epoch)
            self.load_network(netV, 'V', opt.which_epoch)
            self.load_network(netE, 'E', opt.which_epoch)
            if opt.use_audio:
                if opt.use_audio_id:
                    self.load_network(netA, 'A', opt.which_epoch)
                self.load_network(netA_sync, 'A_sync', opt.which_epoch)

            if opt.isTrain and not opt.noload_D:
                self.load_network(netD, 'D', opt.which_epoch)
                # self.load_network(netD_rotate, 'D_rotate', opt.which_epoch, pretrained_path)

        else:
            if self.opt.pretrain:
                if opt.netE == 'fan':
                    netE.load_pretrain()
                netV.load_pretrain()
            if opt.load_separately:
                netG = self.load_separately(netG, 'G', opt)
                netA = self.load_separately(
                    netA, 'A',
                    opt) if opt.use_audio and opt.use_audio_id else None
                netA_sync = self.load_separately(
                    netA_sync, 'A_sync', opt) if opt.use_audio else None
                netV = self.load_separately(netV, 'V', opt)
                netE = self.load_separately(netE, 'E', opt)
                if not opt.noload_D:
                    netD = self.load_separately(netD, 'D', opt)
        return netG, netD, netA, netA_sync, netV, netE
Exemplo n.º 24
0
def load_checkpoint(opt):
    """Loads the generator and discriminator models from checkpoints.
    """
    use_dropout = not opt.no_dropout
    netG_A = define_G(opt.input_nc, opt.output_nc, opt.ndf, opt.which_model_netG, opt.norm, use_dropout)
    netG_B = define_G(opt.output_nc, opt.input_nc, opt.ndf, opt.which_model_netG, opt.norm, use_dropout)
    use_sigmoid = opt.no_lsgan
    netD_A = define_D(opt.output_nc, opt.ndf, opt.which_model_netD, opt.n_layers_D, opt.norm, use_sigmoid)
    netD_B = define_D(opt.input_nc, opt.ndf, opt.which_model_netD, opt.n_layers_D, opt.norm, use_sigmoid)

    checkpoint_file = "%d_checkpoint_ep%d" % (opt.checkpoint_epoch, opt.checkpoint_epoch)
    checkpoint = torch.load(os.path.join(opt.save_dir,checkpoint_file))

    netG_A.load_state_dict(checkpoint['netG_A'])
    netG_B.load_state_dict(checkpoint['netG_B'])
    netD_A.load_state_dict(checkpoint['netD_A'])
    netD_B.load_state_dict(checkpoint['netD_B'])
    # start_epoch = checkpoint['epoch'] + 1

    return netG_A, netG_B, netD_A, netD_B
Exemplo n.º 25
0
 def load_weight(self, pathlist: dict):
     self.net_Gs = []
     self.net_Ds = []
     for weight in pathlist['net_G']:
         net_G = define_G(self.opt).to(self.device)
         net_G.load_state_dict(torch.load(weight, map_location=self.device))
         self.net_Gs.append(net_G)
     for weight in pathlist['net_D']:
         net_D = define_D(self.opt).to(self.device)
         net_D.load_state_dict(torch.load(weight, map_location=self.device))
         self.net_Ds.append(net_D)
    def __init__(self, opt):
        assert opt.isTrain
        opt = copy.deepcopy(opt)
        if len(opt.gpu_ids) > 0:
            opt.gpu_ids = opt.gpu_ids[:1]
        self.gpu_ids = opt.gpu_ids
        super(SPADEModelModules, self).__init__()
        self.opt = opt
        self.model_names = ['G_student', 'G_teacher', 'D']

        teacher_opt = self.create_option('teacher')
        self.netG_teacher = networks.define_G(opt.teacher_netG,
                                              gpu_ids=self.gpu_ids,
                                              opt=teacher_opt)
        student_opt = self.create_option('student')
        self.netG_student = networks.define_G(opt.student_netG,
                                              init_type=opt.init_type,
                                              init_gain=opt.init_gain,
                                              gpu_ids=self.gpu_ids,
                                              opt=student_opt)
        if hasattr(opt, 'distiller'):
            pretrained_opt = self.create_option('pretrained')
            self.netG_pretrained = networks.define_G(opt.pretrained_netG,
                                                     gpu_ids=self.gpu_ids,
                                                     opt=pretrained_opt)
        self.netD = networks.define_D(opt.netD,
                                      init_type=opt.init_type,
                                      init_gain=opt.init_gain,
                                      gpu_ids=self.gpu_ids,
                                      opt=opt)
        self.mapping_layers = ['head_0', 'G_middle_1', 'up_1']
        self.netAs = nn.ModuleList()
        for i, mapping_layer in enumerate(self.mapping_layers):
            if mapping_layer != 'up_1':
                fs, ft = opt.student_ngf * 16, opt.teacher_ngf * 16
            else:
                fs, ft = opt.student_ngf * 4, opt.teacher_ngf * 4
            if hasattr(opt, 'distiller'):
                netA = nn.Conv2d(in_channels=fs,
                                 out_channels=ft,
                                 kernel_size=1)
            else:
                netA = SuperConv2d(in_channels=fs,
                                   out_channels=ft,
                                   kernel_size=1)
            networks.init_net(netA, opt.init_type, opt.init_gain, self.gpu_ids)
            self.netAs.append(netA)
        self.criterionGAN = GANLoss(opt.gan_mode)
        self.criterionFeat = nn.L1Loss()
        self.criterionVGG = VGGLoss()
        self.optimizers = []
        self.netG_teacher.eval()
        self.config = None
Exemplo n.º 27
0
    def initialize_networks(self, opt):
        netG = networks.define_G(opt)
        if opt.isTrain:
            netD = networks.define_D(opt)
            netD_uncond = networks.define_D(opt, True)
        else:
            netD = None
            netD_uncond = None

        netE = networks.define_E(opt) if opt.use_vae else None

        if not opt.isTrain or opt.continue_train:
            netG = util.load_network(netG, 'G', opt.which_epoch, opt)
            if opt.isTrain:
                netD = util.load_network(netD, 'D', opt.which_epoch, opt)
                netD_uncond = util.load_network(netD_uncond, 'D_uncond',
                                                opt.which_epoch, opt)
            if opt.use_vae:
                netE = util.load_network(netE, 'E', opt.which_epoch, opt)

        return netG, netD, netD_uncond, netE
Exemplo n.º 28
0
    def initialize_networks(self, opt, load_weights=True):
        netG = networks.define_G(opt)
        netD = networks.define_D(opt) if opt.isTrain else None
        netE = networks.define_E(opt) if opt.use_vae else None

        if (not opt.isTrain or opt.continue_train) and load_weights:
            netG = util.load_network(netG, 'G', opt.which_epoch, opt)
            if opt.isTrain:
                netD = util.load_network(netD, 'D', opt.which_epoch, opt)
            if opt.use_vae:
                netE = util.load_network(netE, 'E', opt.which_epoch, opt)

        return netG, netD, netE
Exemplo n.º 29
0
    def initialize_networks(self, opt):
        netG = networks.define_G(opt)
        netD = networks.define_D(opt) if opt.isTrain else None
        netE = networks.define_E(opt) if opt.use_vae else None

        if not opt.isTrain or opt.continue_train:
            netG = util.load_network(netG, "G", opt.which_epoch, opt)
            if opt.isTrain:
                netD = util.load_network(netD, "D", opt.which_epoch, opt)
            if opt.use_vae:
                netE = util.load_network(netE, "E", opt.which_epoch, opt)

        return netG, netD, netE
Exemplo n.º 30
0
    def initialize_networks(self, opt):

        netG2 = networks.define_G(opt)
        netD2 = networks.define_D(opt) if opt.isTrain else None
        netE2 = networks.define_E(opt) if opt.use_vae else None

        if not opt.isTrain or opt.continue_train:
            netG2 = util.load_network2(netG2, 'G', opt.which_epoch, opt)
            if opt.isTrain:
                netD2 = util.load_network2(netD2, 'D', opt.which_epoch, opt)
            if opt.use_vae:
                netE2 = util.load_network2(netE2, 'E', opt.which_epoch, opt)
        elif opt.use_vae and opt.pretrain_vae:
            netE2 = util.load_network2(netE2, 'E', opt.which_epoch, opt)

        if opt.edge_cat:
            opt.label_nc -= 1
            opt.semantic_nc -= 1

        netG = networks.define_G(opt)
        netD = networks.define_D(opt) if opt.isTrain else None
        netE = networks.define_E(opt) if opt.use_vae else None

        if not opt.isTrain or opt.continue_train:
            netG = util.load_network(netG, 'G', opt.which_epoch, opt)
            if opt.isTrain:
                netD = util.load_network(netD, 'D', opt.which_epoch, opt)
            if opt.use_vae:
                netE = util.load_network(netE, 'E', opt.which_epoch, opt)
        elif opt.use_vae and opt.pretrain_vae:
            netE = util.load_network(netE, 'E', opt.which_epoch, opt)
            print('Load fixed netE.')

        if opt.edge_cat:
            opt.label_nc += 1
            opt.semantic_nc += 1

        return netG, netD, netE, netG2, netD2, netE2