Example #1
0
    def initialize_networks(self, opt, end2end=False, triple=False):
        if opt.end2endtri:
            netG_1 = networks.define_G(opt, triple)
            netD_1 = networks.define_D(opt, triple)
        netG = networks.define_G(opt)
        netD = networks.define_D(opt) if opt.isTrain else None
        netE = networks.define_E(opt) if opt.use_vae else None

        if not opt.isTrain or opt.continue_train:
            if opt.end2endtri:
                netG_1 = util.load_network(netG_1, 'G', opt.which_triple_epoch,
                                           opt, triple)
            netG = util.load_network(netG, 'G', opt.which_epoch, opt)
            if opt.isTrain:  # and end2end:
                netD = util.load_network(netD, 'D', opt.which_epoch, opt)
                if opt.end2endtri:
                    netD_1 = util.load_network(netD_1, 'D',
                                               opt.which_triple_epoch, opt,
                                               triple)
            if opt.use_vae:
                netE = util.load_network(netE, 'E', opt.which_epoch, opt)
        if not opt.end2endtri:
            netG_1 = None
            netD_1 = None

        return netG, netD, netE, netG_1, netD_1
Example #2
0
def get_nets(path, which_epoch='latest', def_opt=None):
    gpu_ids = [0]
    Tensor = torch.cuda.FloatTensor
    opt = util.load_opt(path, def_opt)
    # assume caffe style model
    opt.not_caffe = False
    netG_A = networks.define_G(opt.input_nc,
                               opt.output_nc,
                               opt.ngf,
                               opt.which_model_netG,
                               opt.norm,
                               not opt.no_dropout,
                               opt.init_type,
                               gpu_ids,
                               opt=opt)
    util.load_network_with_path(netG_A, 'G_A', path)

    netG_B = networks.define_G(opt.input_nc,
                               opt.output_nc,
                               opt.ngf,
                               opt.which_model_netG,
                               opt.norm,
                               not opt.no_dropout,
                               opt.init_type,
                               gpu_ids,
                               opt=opt)
    util.load_network_with_path(netG_B, 'G_B', path)
    netG_A.cuda()
    netG_B.cuda()
    return {'A': netG_A, 'B': netG_B}
Example #3
0
    def initialize_networks(self, opt):
        self.netGA = networks.define_G(opt, opt['netGA'])
        self.netGB = networks.define_G(opt, opt['netGB'])
        self.netDA = networks.define_D(opt, opt['netDA'])
        self.netDB = networks.define_D(opt, opt['netDB'])
        self.netEA, self.netHairA = networks.define_RES(
            opt, opt['input_nc_A'], opt['netEDA'])
        self.netEB, self.netHairB = networks.define_RES(
            opt, opt['input_nc_B'], opt['netEDB'])

        if self.opt['pretrain']:
            self.train_nets = [
                self.netGA, self.netGB, self.netDA, self.netDB, self.netEA,
                self.netHairA, self.netEB, self.netHairB
            ]
        else:
            self.train_nets = [self.netEA, self.netHairA]

        # set require gradients
        if self.isTrain:
            self.set_requires_grad(self.train_nets, True)
        else:
            self.set_requires_grad(self.train_nets, False)

        if self.use_gpu:
            for i in range(len(self.train_nets)):
                self.train_nets[i] = DataParallelWithCallback(
                    self.train_nets[i], device_ids=opt['gpu_ids'])
            if self.opt['pretrain']:
                self.netGA, self.netGB, self.netDA, self.netDB, self.netEA, \
                    self.netHairA, self.netEB, self.netHairB = self.train_nets
            else:
                self.netEA, self.netHairA = self.train_nets
    def __init__(self, opt, device):
        super(CycleGAN, self).__init__()

        self.device = device
        self.opt = opt

        self.netG_A = networks.define_G(self.opt.input_nc, self.opt.output_nc,
                                        self.opt.ngf, self.opt.netG,
                                        self.opt.norm, self.opt.dropout,
                                        self.opt.init_type, self.opt.init_gain,
                                        self.opt.task_num,
                                        self.opt.netG_A_filter_list)
        self.netG_B = networks.define_G(self.opt.input_nc, self.opt.output_nc,
                                        self.opt.ngf, self.opt.netG,
                                        self.opt.norm, self.opt.dropout,
                                        self.opt.init_type, self.opt.init_gain,
                                        self.opt.task_num,
                                        self.opt.netG_B_filter_list)

        if opt.train:
            self.netD_A = networks.define_D(self.opt.input_nc, self.opt.ndf,
                                            self.opt.netD, self.opt.norm,
                                            self.opt.init_type,
                                            self.opt.init_gain)
            self.netD_B = networks.define_D(self.opt.input_nc, self.opt.ndf,
                                            self.opt.netD, self.opt.norm,
                                            self.opt.init_type,
                                            self.opt.init_gain)

            self.fake_A_pool = ImageBuffer(
                self.opt.pool_size
            )  # create image buffer to store previously generated images
            self.fake_B_pool = ImageBuffer(
                self.opt.pool_size
            )  # create image buffer to store previously generated images

            self.criterionGAN = networks.GANLoss(self.opt.gan_mode).to(
                self.device)  # define GAN loss.
            self.criterionCycle = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()

            self.optimizer_G = torch.optim.Adam(itertools.chain(
                self.netG_A.parameters(), self.netG_B.parameters()),
                                                lr=self.opt.lr,
                                                betas=(self.opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(itertools.chain(
                self.netD_A.parameters(), self.netD_B.parameters()),
                                                lr=self.opt.lr,
                                                betas=(self.opt.beta1, 0.999))

            self.optimizers = []
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)

            self.schedulers = [
                networks.get_scheduler(optimizer, opt)
                for optimizer in self.optimizers
            ]
Example #5
0
def get_stack_nets(path, which_epoch='latest'):
    gpu_ids = [0]
    Tensor = torch.cuda.FloatTensor
    opt = util.load_opt(path)
    netG_A = networks.define_G(opt.input_nc,
                               opt.output_nc,
                               opt.ngf,
                               opt.which_model_netG,
                               opt.norm,
                               False,
                               opt.init_type,
                               gpu_ids,
                               n_upsampling=3,
                               opt=opt)
    netG_B = networks.define_G(opt.output_nc,
                               opt.input_nc,
                               opt.ngf,
                               opt.which_model_netG,
                               opt.norm,
                               False,
                               opt.init_type,
                               gpu_ids,
                               n_upsampling=3,
                               opt=opt)
    netG_A_pre = networks.define_G(opt.input_nc,
                                   opt.output_nc,
                                   opt.ngf,
                                   opt.which_model_netG,
                                   opt.norm,
                                   False,
                                   opt.init_type,
                                   gpu_ids,
                                   n_downsampling=3,
                                   opt=opt)
    netG_B_pre = networks.define_G(opt.output_nc,
                                   opt.input_nc,
                                   opt.ngf,
                                   opt.which_model_netG,
                                   opt.norm,
                                   False,
                                   opt.init_type,
                                   gpu_ids,
                                   n_downsampling=3,
                                   opt=opt)

    load_network_with_path(netG_A, 'G_A', which_epoch, path)
    load_network_with_path(netG_B, 'G_B', which_epoch, path)
    load_network_with_path(netG_A_pre, 'G_A_pre', which_epoch, path)
    load_network_with_path(netG_B_pre, 'G_B_pre', which_epoch, path)

    netG_A.cuda()
    netG_B.cuda()
    netG_A_pre.cuda()
    netG_B_pre.cuda()

    return {'A': netG_A, 'B': netG_B, 'A_pre': netG_A_pre, 'B_pre': netG_B_pre}
    def __init__(self, opt):
        assert opt.isTrain
        opt = copy.deepcopy(opt)
        if len(opt.gpu_ids) > 0:
            opt.gpu_ids = opt.gpu_ids[:1]
        self.gpu_ids = opt.gpu_ids
        super(SPADEModelModules, self).__init__()
        self.opt = opt
        self.model_names = ['G_student', 'G_teacher', 'D']

        teacher_opt = self.create_option('teacher')
        self.netG_teacher = networks.define_G(opt.teacher_netG,
                                              gpu_ids=self.gpu_ids,
                                              opt=teacher_opt)
        student_opt = self.create_option('student')
        self.netG_student = networks.define_G(opt.student_netG,
                                              init_type=opt.init_type,
                                              init_gain=opt.init_gain,
                                              gpu_ids=self.gpu_ids,
                                              opt=student_opt)
        if hasattr(opt, 'distiller'):
            pretrained_opt = self.create_option('pretrained')
            self.netG_pretrained = networks.define_G(opt.pretrained_netG,
                                                     gpu_ids=self.gpu_ids,
                                                     opt=pretrained_opt)
        self.netD = networks.define_D(opt.netD,
                                      init_type=opt.init_type,
                                      init_gain=opt.init_gain,
                                      gpu_ids=self.gpu_ids,
                                      opt=opt)
        self.mapping_layers = ['head_0', 'G_middle_1', 'up_1']
        self.netAs = nn.ModuleList()
        for i, mapping_layer in enumerate(self.mapping_layers):
            if mapping_layer != 'up_1':
                fs, ft = opt.student_ngf * 16, opt.teacher_ngf * 16
            else:
                fs, ft = opt.student_ngf * 4, opt.teacher_ngf * 4
            if hasattr(opt, 'distiller'):
                netA = nn.Conv2d(in_channels=fs,
                                 out_channels=ft,
                                 kernel_size=1)
            else:
                netA = SuperConv2d(in_channels=fs,
                                   out_channels=ft,
                                   kernel_size=1)
            networks.init_net(netA, opt.init_type, opt.init_gain, self.gpu_ids)
            self.netAs.append(netA)
        self.criterionGAN = GANLoss(opt.gan_mode)
        self.criterionFeat = nn.L1Loss()
        self.criterionVGG = VGGLoss()
        self.optimizers = []
        self.netG_teacher.eval()
        self.config = None
Example #7
0
    def define_networks(self, start_epoch):
        opt = self.opt        
        # Generator network        
        input_nc = opt.label_nc if (opt.label_nc != 0 and not self.pose) else opt.input_nc
        netG_input_nc = input_nc           
        opt.for_face = False        
        self.netG = networks.define_G(opt)        
        if self.refine_face:            
            opt_face = copy.deepcopy(opt)
            opt_face.n_downsample_G -= 1
            if opt_face.n_adaptive_layers > 0: opt_face.n_adaptive_layers -= 1
            opt_face.input_nc = opt.output_nc
            opt_face.fineSize = self.faceRefiner.face_size
            opt_face.aspect_ratio = 1
            opt_face.for_face = True
            self.netGf = networks.define_G(opt_face)

        # Discriminator network
        if self.isTrain or opt.finetune:            
            netD_input_nc = input_nc + opt.output_nc + (1 if self.concat_fg_mask_for_D else 0)
            if self.concat_ref_for_D:
                netD_input_nc *= 2
            self.netD = networks.define_D(opt, netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm_D, opt.netD_subarch, 
                                          opt.num_D, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids)            
            if self.add_face_D:
                self.netDf = networks.define_D(opt, opt.output_nc * 2, opt.ndf, opt.n_layers_D, opt.norm_D, 'n_layers',
                                               1, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids)
            else:
                self.netDf = None
        self.temporal = False
        self.netDT = None             
                    
        print('---------- Networks initialized -------------')

        # initialize optimizers
        if self.isTrain:            
            # optimizer G
            params = list(self.netG.parameters())           
            if self.refine_face: params += list(self.netGf.parameters())
            self.optimizer_G = self.get_optimizer(params, for_discriminator=False)

            # optimizer D            
            params = list(self.netD.parameters())
            if self.add_face_D: params += list(self.netDf.parameters())
            self.optimizer_D = self.get_optimizer(params, for_discriminator=True)           

        print('---------- Optimizers initialized -------------')

        # make model temporal by generating multiple frames
        if (not opt.isTrain or start_epoch > opt.niter_single) and opt.n_frames_G > 1:
            self.make_temporal_model() 
Example #8
0
    def __init__(self, opt):
        """Initialize the CycleGAN class.

        Parameters:
            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseModel.__init__(self, opt)
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
        # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
        visual_names_A = ['real_A', 'fake_B', 'rec_A']
        visual_names_B = ['real_B', 'fake_A', 'rec_B']
        if self.isTrain and self.opt.lambda_identity > 0.0:  # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
            visual_names_A.append('idt_B')
            visual_names_B.append('idt_A')

        self.visual_names = visual_names_A + visual_names_B  # combine visualizations for A and B
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
        if self.isTrain:
            self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
        else:  # during test time, only load Gs
            self.model_names = ['G_A', 'G_B']

        # define networks (both Generators and discriminators)
        # The naming is different from those used in the paper.
        # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
        self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
                                        not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
                                        not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:  # define discriminators
            self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
            self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:
            if opt.lambda_identity > 0.0:  # only works when input and output images have the same number of channels
                assert(opt.input_nc == opt.output_nc)
            self.fake_A_pool = ImagePool(opt.pool_size)  # create image buffer to store previously generated images
            self.fake_B_pool = ImagePool(opt.pool_size)  # create image buffer to store previously generated images
            # define loss functions
            self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)  # define GAN loss.
            self.criterionCycle = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()
            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
Example #9
0
    def initialize_networks(self, opt):
        netG_for_CT = networks.define_G(opt)
        netD_aligned = networks.define_D(opt) if opt.isTrain else None
        netG_for_MR = networks.define_G(opt)
        netD_unaligned = networks.define_D(opt) if opt.isTrain else None
        
        if not opt.isTrain or opt.continue_train:
            netG_for_CT = util.load_network(netG_for_CT, 'G_for_CT', opt.which_epoch, opt)
            netG_for_MR = util.load_network(netG_for_MR, 'G_for_MR', opt.which_epoch, opt)
            if opt.isTrain:
                netD_aligned = util.load_network(netD_aligned, 'D_aligned', opt.which_epoch, opt)
                netD_unaligned = util.load_network(netD_unaligned, 'D_unaligned', opt.which_epoch, opt)

        return netG_for_CT, netD_aligned, netG_for_MR, netD_unaligned
Example #10
0
    def __init__(self, opt):
        super(HyperRIMModel, self).__init__(opt)
        train_opt = opt['train']

        # define networks and load pretrained models
        self.netG = networks.define_G(opt).to(self.device)
        if self.is_train:
            self.netG.train()
        self.load()
        # store the number of levels and code channel
        self.num_levels = int(math.log(opt['scale'], 2))
        self.code_nc = opt['network_G']['code_nc']
        self.map_nc = opt['network_G']['map_nc']

        # define losses, optimizer and scheduler
        self.netF = networks.define_F(opt).to(self.device)
        self.projections = None
        if self.is_train:
            # G
            wd_G = train_opt['weight_decay_G'] if train_opt['weight_decay_G'] else 0
            map_network_params = []
            core_network_params = []
            # can freeze weights for any of the levels
            freeze_level = train_opt['freeze_level']
            for k, v in self.netG.named_parameters():
                if v.requires_grad:
                    if freeze_level:
                        if "level_%d" % freeze_level not in k:
                            if 'map' in k:
                                map_network_params.append(v)
                            else:
                                core_network_params.append(v)
                    else:
                        if 'map' in k:
                            map_network_params.append(v)
                        else:
                            core_network_params.append(v)
                else:
                    print('WARNING: params [{:s}] will not optimize.'.format(k))
            self.optimizer_G = torch.optim.Adam([{'params': core_network_params},
                                                 {'params': map_network_params, 'lr': 1e-2 * train_opt['lr_G']}],
                                                lr=train_opt['lr_G'], weight_decay=wd_G,
                                                betas=(train_opt['beta1_G'], 0.999))
            self.optimizers.append(self.optimizer_G)
            # for resume training - load the previous optimizer stats
            self.load_optimizer()

            # schedulers
            if train_opt['lr_scheme'] == 'MultiStepLR':
                for optimizer in self.optimizers:
                    self.schedulers.append(lr_scheduler.MultiStepLR(optimizer, train_opt['lr_steps'],
                                                                    train_opt['lr_gamma']))
            else:
                raise NotImplementedError('MultiStepLR learning rate scheme is enough.')

            self.log_dict = OrderedDict()

        print('---------- Model initialized ------------------')
        self.print_network()
        print('-----------------------------------------------')
    def initialize_networks(self, opt):
        netG = networks.define_G(opt)
        # netD = networks.define_D(opt) if opt.isTrain else None
        if opt.isTrain:
            opt.label_nc = opt.label_nc-1
            netD = networks.define_D(opt)
        else:
            netD_fine = None

        netE = networks.define_E(opt) if opt.use_vae else None
        if opt.isTrain:
            opt.label_nc = (opt.label_nc+1)
            netD_fine = networks.define_D(opt)
        else:
            netD_fine = None

        if not opt.isTrain or opt.continue_train:
            netG = util.load_network(netG, 'G', opt.which_epoch, opt)
            if opt.isTrain:
                netD = util.load_network(netD, 'D', opt.which_epoch, opt)
                netD_fine = util.load_network(netD_fine, 'D', opt.which_epoch, opt)
            else:
                netD = None
                netD_fine = None
            if opt.use_vae:
                netE = util.load_network(netE, 'E', opt.which_epoch, opt)

        return netG, netD, netE, netD_fine
Example #12
0
    def __init__(self, opt):
        super(SRModel, self).__init__(opt)
        train_opt = opt['train']

        # define network and load pretrained models
        self.netG = networks.define_G(opt).to(self.device)
        self.load()

        if self.is_train:
            self.netG.train()

            # loss
            loss_type = train_opt['pixel_criterion']
            if loss_type == 'l1':
                self.cri_pix = nn.L1Loss().to(self.device)
            elif loss_type == 'l2':
                self.cri_pix = nn.MSELoss().to(self.device)
            else:
                raise NotImplementedError('Loss type [{:s}] is not recognized.'.format(loss_type))
            self.l_pix_w = train_opt['pixel_weight']

            # G feature loss
            if 'feature_weight' in train_opt and train_opt['feature_weight'] > 0:
                l_fea_type = train_opt['feature_criterion']
                if l_fea_type == 'l1':
                    self.cri_fea = nn.L1Loss().to(self.device)
                elif l_fea_type == 'l2':
                    self.cri_fea = nn.MSELoss().to(self.device)
                else:
                    raise NotImplementedError('Loss type [{:s}] not recognized.'.format(l_fea_type))
                self.l_fea_w = train_opt['feature_weight']
            else:
                logger.info('Remove feature loss.')
                self.cri_fea = None
            if self.cri_fea:  # load VGG perceptual loss
                self.netF = networks.define_F(opt, use_bn=False).to(self.device)

            # optimizers
            wd_G = train_opt['weight_decay_G'] if train_opt['weight_decay_G'] else 0
            optim_params = []
            for k, v in self.netG.named_parameters():  # can optimize for a part of the model
                if v.requires_grad:
                    optim_params.append(v)
                else:
                    logger.warning('Params [{:s}] will not optimize.'.format(k))
            self.optimizer_G = torch.optim.Adam(
                optim_params, lr=train_opt['lr_G'], weight_decay=wd_G)
            self.optimizers.append(self.optimizer_G)

            # schedulers
            if train_opt['lr_scheme'] == 'MultiStepLR':
                for optimizer in self.optimizers:
                    self.schedulers.append(lr_scheduler.MultiStepLR(optimizer, \
                        train_opt['lr_steps'], train_opt['lr_gamma']))
            else:
                raise NotImplementedError('MultiStepLR learning rate scheme is enough.')

            self.log_dict = OrderedDict()
        # print network
        self.print_network()
Example #13
0
def main():
    opt = {
         'gpu_ids': [0],
         'model': 'NLCSNet',
         'network_G': {'which_model_G': 'NLCSGen_ResNet',
          'is_train': True,
          'norm_type': None,
          'act_type': 'leakyrelu',
          'mode': 'CNA',
          'k': 6,
          'in_nc': 1,
          'out_nc': 1,
          'nf': 16,
          'nb': 5,
          'upscale': 2,
          'ksize_enc': 8,
          'patch_size': 4,
          'patch_stride': 4,
          'group': 1,
          'gc': 32,
          'upsample_mode': 'pixelshuffle',
          'fusion': 'cat',
          'enc_enable': False},
         'train': {'lr_G': 0.0001,
          'weight_decay_G': 0,
          'beta1_G': 0.9,
          'lr_scheme': 'MultiStepLR',
          'lr_steps': [20000.0, 50000.0],
          'lr_gamma': 0.5,
          'pixel_criterion': 'l2',
          'pixel_weight': 1,
          'feature_criterion': 'l2',
          'feature_weight': 0,
          'manual_seed': 0,
          'niter': 100000.0,
          'val_freq': 8,
          'lr_decay_iter': 10},
         }

    opt['network_G']['enc_enable'] = True
    device = torch.device('cuda' if opt['gpu_ids'] is not None else 'cpu')

    x = torch.rand([16, 1, 128, 128], requires_grad=True).double().to(device)
    z = torch.rand([16, 1, 32, 32], requires_grad=True).double().to(device)

    # model1 = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=4, stride=4).double().to(device)
    # y = model1(x)
    # cri_pix = nn.MSELoss().to(device)
    # l_g_pix = cri_pix(y, z)
    # l_g_pix.backward()
    # for p in model1.parameters():
    #     print(p.grad)

    model = networks.define_G(opt).double().to(device)
    y = model(x)
    cri_pix = nn.MSELoss().to(device)
    l_g_pix = cri_pix(y, z)
    l_g_pix.backward()
    for p in model.parameters():
        print(p.grad)
Example #14
0
    def initialize_networks(self, opt):
        net = {}
        net['netG'] = networks.define_G(opt)
        net['netD'] = networks.define_D(opt) if opt.isTrain else None
        net['netCorr'] = networks.define_Corr(opt)
        net['netDomainClassifier'] = networks.define_DomainClassifier(
            opt) if opt.weight_domainC > 0 and opt.domain_rela else None

        if not opt.isTrain or opt.continue_train:
            net['netG'] = util.load_network(net['netG'], 'G', opt.which_epoch,
                                            opt)
            if opt.isTrain:
                net['netD'] = util.load_network(net['netD'], 'D',
                                                opt.which_epoch, opt)
            net['netCorr'] = util.load_network(net['netCorr'], 'Corr',
                                               opt.which_epoch, opt)
            if opt.weight_domainC > 0 and opt.domain_rela:
                net['netDomainClassifier'] = util.load_network(
                    net['netDomainClassifier'], 'DomainClassifier',
                    opt.which_epoch, opt)
            if (not opt.isTrain) and opt.use_ema:
                net['netG'] = util.load_network(net['netG'], 'G_ema',
                                                opt.which_epoch, opt)
                net['netCorr'] = util.load_network(net['netCorr'],
                                                   'netCorr_ema',
                                                   opt.which_epoch, opt)
        return net
Example #15
0
    def __init__(self, args, logger):
        super().__init__(args, logger)
        # specify the training losses you want to print out. The program will call base_model.get_current_losses
        self.loss_names = ['loss_G', 'loss_D']
        # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
        self.model_names = ['G', 'D']

        self.sample_names = ['fake_B', 'real_A', 'real_B']
        # load/define networks
        self.G = networks.define_G(args.input_nc, args.output_nc, args.ngf,
                                      args.which_model_netG, args.norm, not args.no_dropout, args.init_type, args.init_gain, self.gpu_ids)

        if not 'continue_train' in args:
            use_sigmoid = args.no_lsgan
            self.D = networks.define_D(args.input_nc + args.output_nc, args.ndf,
                                          args.which_model_netD,
                                          args.n_layers_D, args.norm, use_sigmoid, args.init_type, args.init_gain, self.gpu_ids)

            self.fake_AB_pool = ImagePool(args.pool_size)
            # define loss functions
            self.criterionGAN = networks.GANLoss(use_lsgan=not args.no_lsgan).to(self.device)
            self.criterionL1 = torch.nn.L1Loss()

            # initialize optimizers
            self.optimizers = []
            self.optimizer_G = torch.optim.Adam(self.G.parameters(),
                                                lr=args.g_lr, betas=(args.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(self.D.parameters(),
                                                lr=args.d_lr, betas=(args.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
Example #16
0
    def initialize_networks(self, opt):

        netG = networks.define_G(opt)
        netD = networks.define_D(opt) if opt.isTrain else None
        netD_rotate = networks.define_D(opt) if opt.isTrain else None
        netE = networks.define_E(opt) if opt.use_vae else None
        pretrained_path = ''
        if not opt.isTrain or opt.continue_train:
            self.load_network(netG, 'G', opt.which_epoch, pretrained_path)
            if opt.isTrain and not opt.noload_D:
                self.load_network(netD, 'D', opt.which_epoch, pretrained_path)
                self.load_network(netD_rotate, 'D_rotate', opt.which_epoch,
                                  pretrained_path)
            if opt.use_vae:
                self.load_network(netE, 'E', opt.which_epoch, pretrained_path)
        else:

            if opt.load_separately:
                netG = self.load_separately(netG, 'G', opt)
                if not opt.noload_D:
                    netD = self.load_separately(netD, 'D', opt)
                    netD_rotate = self.load_separately(netD_rotate, 'D_rotate',
                                                       opt)
                if opt.use_vae:
                    netE = self.load_separately(netE, 'E', opt)

        return netG, netD, netE, netD_rotate
Example #17
0
 def __init__(self, opt):
     super(TestModel, self).__init__(opt)
     self.visual_names = ['real_A', 'fake_B', 'real_B']
     self.model_names = ['G']
     self.netG = networks.define_G(opt.netG, input_nc=opt.input_nc, output_nc=opt.output_nc, ngf=opt.ngf,
                                   norm=opt.norm, dropout_rate=opt.dropout_rate, gpu_ids=self.gpu_ids, opt=opt)
     self.netG.eval()
    def __init__(self, opt):
        BaseModel.__init__(self, opt)
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
        # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
        self.visual_names = ['real_A', 'fake_B', 'real_B']
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
        if self.isTrain:
            self.model_names = ['G', 'D']
        else:  # during test time, only load G
            self.model_names = ['G']
        # define networks (both generator and discriminator)
        self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
                                      not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:  # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
            self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
                                          opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:
            # define loss functions
            self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
            self.criterionL1 = torch.nn.L1Loss()
            
            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
Example #19
0
    def __init__(self, opt):
        BaseModel.__init__(self, opt)

        self.netP = networks.define_P(opt, weight_path=opt.parse_net_weight)
        self.netG = networks.define_G(opt, use_norm='spectral_norm')

        if self.isTrain:
            self.netD = networks.define_D(opt, opt.Dinput_nc, use_norm='spectral_norm') 
            self.vgg_model = loss.PCPFeat(weight_path='./pretrain_models/vgg19-dcbb9e9d.pth').to(opt.device)
            if len(opt.gpu_ids) > 0:
                self.vgg_model = torch.nn.DataParallel(self.vgg_model, opt.gpu_ids, output_device=opt.device)

        self.model_names = ['G']
        self.loss_names = ['Pix', 'PCP', 'G', 'FM', 'D', 'SS'] # Generator loss, fm loss, parsing loss, discriminator loss
        self.visual_names = ['img_LR', 'img_HR', 'img_SR', 'ref_Parse', 'hr_mask']
        self.fm_weights = [1**x for x in range(opt.D_num)]

        if self.isTrain:
            self.model_names = ['G', 'D']
            self.load_model_names = ['G', 'D']

            self.criterionParse = torch.nn.CrossEntropyLoss().to(opt.device)
            self.criterionFM = loss.FMLoss().to(opt.device)
            self.criterionGAN = loss.GANLoss(opt.gan_mode).to(opt.device)
            self.criterionPCP = loss.PCPLoss(opt)
            self.criterionPix= nn.L1Loss()
            self.criterionRS = loss.RegionStyleLoss()

            self.optimizer_G = optim.Adam([p for p in self.netG.parameters() if p.requires_grad], lr=opt.g_lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = optim.Adam([p for p in self.netD.parameters() if p.requires_grad], lr=opt.d_lr, betas=(opt.beta1, 0.999))
            self.optimizers = [self.optimizer_G, self.optimizer_D]
Example #20
0
    def __init__(self, opt):
        super(SRModel, self).__init__(opt)
        train_opt = opt['train']

        # define network and load pretrained models
        self.netG = networks.define_G(opt).to(self.device)
        self.load()

        if self.is_train:
            self.netG.train()

            # loss
            loss_type = train_opt['pixel_criterion']
            if loss_type == 'l1':
                self.cri_pix = nn.L1Loss().to(self.device)
            elif loss_type == 'l2':
                self.cri_pix = nn.MSELoss().to(self.device)
            else:
                raise NotImplementedError(
                    'Loss type [{:s}] is not recognized.'.format(loss_type))
            self.l_pix_w = train_opt['pixel_weight']

            # optimizers
            wd_G = train_opt['weight_decay_G'] if train_opt[
                'weight_decay_G'] else 0

            # find the parameters to optimize
            if opt['finetune_norm']:
                optim_params = []
                for k, v in self.netG.named_parameters():
                    v.requires_grad = False
                    if k.find('transformer') >= 0:
                        v.requires_grad = True
                        v.data.zero_()
                        optim_params.append(v)
                        logger.info(
                            'Params [{:s}] initialized to 0 and will optimize.'
                            .format(k))
            else:
                optim_params = list(self.netG.parameters())

            self.optimizer_G = torch.optim.Adam(optim_params,
                                                lr=train_opt['lr_G'],
                                                weight_decay=wd_G)
            self.optimizers.append(self.optimizer_G)

            # schedulers
            if train_opt['lr_scheme'] == 'MultiStepLR':
                for optimizer in self.optimizers:
                    self.schedulers.append(lr_scheduler.MultiStepLR(optimizer, \
                        train_opt['lr_steps'], train_opt['lr_gamma']))
            else:
                raise NotImplementedError(
                    'MultiStepLR learning rate scheme is enough.')

            self.log_dict = OrderedDict()
        # print network
        self.print_network()
Example #21
0
    def __init__(self, opt):
        super(SRModel, self).__init__(opt)
        train_opt = opt['train']
        self.input_L = self.Tensor()
        self.input_H = self.Tensor()

        # define network and load pretrained models
        self.netG = networks.define_G(opt)
        self.load()

        if self.is_train:
            self.netG.train()

            # loss
            loss_type = train_opt['pixel_criterion']
            if loss_type == 'l1':
                self.cri_pix = nn.L1Loss()
            elif loss_type == 'l2':
                self.cri_pix = nn.MSELoss()
            else:
                raise NotImplementedError('Loss type [%s] is not recognized.' %
                                          loss_type)
            if self.use_gpu:
                self.cri_pix.cuda()
            self.l_pix_w = train_opt['pixel_weight']

            # optimizers
            self.optimizers = []
            wd_G = train_opt['weight_decay_G'] if train_opt[
                'weight_decay_G'] else 0
            optim_params = []
            for k, v in self.netG.named_parameters(
            ):  # can optimize for a part of the model
                if v.requires_grad:
                    optim_params.append(v)
                else:
                    print('WARNING: params [%s] will not optimize.' % k)
            self.optimizer_G = torch.optim.Adam(optim_params,
                                                lr=train_opt['lr_G'],
                                                weight_decay=wd_G)
            self.optimizers.append(self.optimizer_G)

            # schedulers
            self.schedulers = []
            if train_opt['lr_scheme'] == 'MultiStepLR':
                for optimizer in self.optimizers:
                    self.schedulers.append(lr_scheduler.MultiStepLR(optimizer, \
                        train_opt['lr_steps'], train_opt['lr_gamma']))
            else:
                raise NotImplementedError(
                    'MultiStepLR learning rate scheme is enough.')

            self.log_dict = OrderedDict()

        print('---------- Model initialized ------------------')
        self.print_network()
        print('-----------------------------------------------')
Example #22
0
    def __init__(self, opt):
        super(SRModel, self).__init__(opt)
        train_opt = opt['train']
        self.chop = opt['chop']
        self.scale = opt['scale']
        self.val_lpips = opt['val_lpips']
        # define network and load pretrained models
        self.netG = networks.define_G(opt).to(self.device)
        self.load()

        if self.is_train:
            self.netG.train()

            # loss
            loss_type = train_opt['pixel_criterion']
            if loss_type == 'l1':
                self.cri_pix = nn.L1Loss().to(self.device)
            elif loss_type == 'l2':
                self.cri_pix = nn.MSELoss().to(self.device)
            else:
                raise NotImplementedError(
                    'Loss type [{:s}] is not recognized.'.format(loss_type))
            self.l_pix_w = train_opt['pixel_weight']

            # optimizers
            wd_G = train_opt['weight_decay_G'] if train_opt[
                'weight_decay_G'] else 0
            optim_params = []
            for k, v in self.netG.named_parameters(
            ):  # can optimize for a part of the model
                if v.requires_grad:
                    optim_params.append(v)
                else:
                    logger.warning(
                        'Params [{:s}] will not optimize.'.format(k))
            self.optimizer_G = torch.optim.Adam(optim_params,
                                                lr=train_opt['lr_G'],
                                                weight_decay=wd_G)
            self.optimizers.append(self.optimizer_G)

            # schedulers
            if train_opt['lr_scheme'] == 'MultiStepLR':
                for optimizer in self.optimizers:
                    self.schedulers.append(lr_scheduler.MultiStepLR(optimizer, \
                        train_opt['lr_steps'], train_opt['lr_gamma']))
            else:
                raise NotImplementedError(
                    'MultiStepLR learning rate scheme is enough.')

            self.log_dict = OrderedDict()
        # print network
        self.print_network()

        if self.val_lpips:
            self.cri_fea_lpips = PerceptualLoss(model='net-lin',
                                                net='alex').to(self.device)
Example #23
0
    def initialize_networks(self, opt):
        netG = networks.define_G(opt)
        netD = networks.define_D(opt) if opt.isTrain else None

        if not opt.isTrain or opt.continue_train:
            netG = util.load_network(netG, 'G', opt.which_epoch, opt)
            if opt.isTrain:
                netD = util.load_network(netD, 'D', opt.which_epoch, opt)

        return netG, netD
def main(opt):
    # define the generator with spectral normalization. Only the last argument counts
    netG = networks.define_G(opt.netG, opt=opt)
    util.load_network(netG, opt.restore_G_path, True)
    print(netG)
    netG.remove_spectral_norm()
    dirname = os.path.dirname(opt.output_path)
    os.makedirs(dirname, exist_ok=True)
    torch.save(netG.cpu().state_dict(), opt.output_path)
    print('Successfully export the model at [%s]!' % opt.output_path)
    def initialize_networks(self, opt):
        net = {}
        net['netG'] = networks.define_G(opt)  # Get SPADEGenerator
        net['netCorr'] = networks.define_Corr(opt)  # Get NoVGGCorrespondence

        if not opt.isTrain or opt.continue_train:
            net['netG'] = util.load_network(net['netG'], 'G', opt.which_epoch,
                                            opt)
            net['netCorr'] = util.load_network(net['netCorr'], 'Corr',
                                               opt.which_epoch, opt)
        return net
Example #26
0
    def initialize_networks(self, opt):
        netG = None
        netD = None
        netE = None
        netV = None
        netA = None
        netA_sync = None
        if opt.train_recognition:
            netV = networks.define_V(opt)
        elif opt.train_sync:
            netA_sync = networks.define_A_sync(opt) if opt.use_audio else None
            netE = networks.define_E(opt)
        else:

            netG = networks.define_G(opt)
            netA = networks.define_A(
                opt) if opt.use_audio and opt.use_audio_id else None
            netA_sync = networks.define_A_sync(opt) if opt.use_audio else None
            netE = networks.define_E(opt)
            netV = networks.define_V(opt)

            if opt.isTrain:
                netD = networks.define_D(opt)

        if not opt.isTrain or opt.continue_train:
            self.load_network(netG, 'G', opt.which_epoch)
            self.load_network(netV, 'V', opt.which_epoch)
            self.load_network(netE, 'E', opt.which_epoch)
            if opt.use_audio:
                if opt.use_audio_id:
                    self.load_network(netA, 'A', opt.which_epoch)
                self.load_network(netA_sync, 'A_sync', opt.which_epoch)

            if opt.isTrain and not opt.noload_D:
                self.load_network(netD, 'D', opt.which_epoch)
                # self.load_network(netD_rotate, 'D_rotate', opt.which_epoch, pretrained_path)

        else:
            if self.opt.pretrain:
                if opt.netE == 'fan':
                    netE.load_pretrain()
                netV.load_pretrain()
            if opt.load_separately:
                netG = self.load_separately(netG, 'G', opt)
                netA = self.load_separately(
                    netA, 'A',
                    opt) if opt.use_audio and opt.use_audio_id else None
                netA_sync = self.load_separately(
                    netA_sync, 'A_sync', opt) if opt.use_audio else None
                netV = self.load_separately(netV, 'V', opt)
                netE = self.load_separately(netE, 'E', opt)
                if not opt.noload_D:
                    netD = self.load_separately(netD, 'D', opt)
        return netG, netD, netA, netA_sync, netV, netE
Example #27
0
 def load_weight(self, pathlist: dict):
     self.net_Gs = []
     self.net_Ds = []
     for weight in pathlist['net_G']:
         net_G = define_G(self.opt).to(self.device)
         net_G.load_state_dict(torch.load(weight, map_location=self.device))
         self.net_Gs.append(net_G)
     for weight in pathlist['net_D']:
         net_D = define_D(self.opt).to(self.device)
         net_D.load_state_dict(torch.load(weight, map_location=self.device))
         self.net_Ds.append(net_D)
Example #28
0
 def __init__(self, opt):
     self.opt = opt
     self.device = torch.device(
         'cuda' if opt['gpu_ids'] is not None else 'cpu')
     self.is_train = opt['is_train']
     self.schedulers = []
     self.optimizers = []
     # define network and load pretrained models
     self.netG = networks.define_G(opt).to(self.device)
     self.print_network()
     self.load()
Example #29
0
 def __init__(self, opt):
     super(SPADEModel, self).__init__(opt)
     self.model_names = ['G']
     self.visual_names = ['labels', 'fake_B', 'real_B']
     self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
                                   opt.norm, opt.dropout_rate, opt.init_type,
                                   opt.init_gain, self.gpu_ids, opt=opt)
     if opt.isTrain:
         raise NotImplementedError("Training mode of SPADE is currently not supported!!!")
     else:
         self.netG.eval()
 def __init__(self, model_path):
     self.model = define_G()
     self.transform_list = [
         transforms.ToTensor(),
         transforms.Normalize((0.5, 0.5, 0.5, 0.5), (0.5, 0.5, 0.5, 0.5))
     ]
     self.transform = transforms.Compose(self.transform_list)
     self.model.load_state_dict(torch.load(model_path))
     self.device = torch.device(
         "cuda" if torch.cuda.is_available() else "cpu")
     self.model.eval()