示例#1
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        self.is_Train = opt.is_Train

        self.G = Generator(N_p=opt.N_p, N_z=opt.N_z)
        self.D = Discriminator(N_p=opt.N_p, N_d=opt.N_d)
        if self.is_Train:
            self.optimizer_G = optim.Adam(self.G.parameters(),
                                          lr=opt.lr_G,
                                          betas=(opt.beta1, opt.beta2))
            self.optimizer_D = optim.Adam(self.D.parameters(),
                                          lr=opt.lr_D,
                                          betas=(opt.beta1, opt.beta2))
            self.criterion = nn.CrossEntropyLoss()
            self.L1_criterion = nn.L1Loss()
            self.w_L1 = opt.w_L1

        self.N_z = opt.N_z
        self.N_p = opt.N_p
        self.N_d = opt.N_d

        import torchvision.models as models
        self.resnet18 = models.resnet18(pretrained=True)
        if self.gpu_ids:
            self.resnet18 = self.resnet18.cuda()
        self.resnet18.fc = torch.nn.LeakyReLU(0.1)
        for param in self.resnet18.parameters():
            param.requires_grad = False
        self.resnet18.eval()

        torch.set_num_threads(1)
示例#2
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        self.is_Train = opt.is_Train

        self.G = Generator(N_p=opt.N_p, N_z=opt.N_z,single = True)
        self.D = Discriminator(N_p=opt.N_p, N_d=opt.N_d)
        if self.is_Train:
            self.optimizer_G = optim.Adam(self.G.parameters(), lr=opt.lr_G, betas=(opt.beta1, opt.beta2))
            self.optimizer_D = optim.Adam(self.D.parameters(), lr=opt.lr_D, betas=(opt.beta1, opt.beta2))
            self.criterion = nn.CrossEntropyLoss()
            self.L1_criterion = nn.L1Loss()
            self.w_L1 = opt.w_L1

        self.N_z = opt.N_z
        self.N_p = opt.N_p
        self.N_d = opt.N_d
示例#3
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        self.isTrain = opt.isTrain

        # define tensors
        # self.Tensor is torch.cuda.Tensor if gpu_ids is defined, otherwise use torch.FloatTensor
        self.input_A = self.Tensor(opt.batchSize, opt.input_nc,
                                   opt.fineSize, opt.fineSize).cuda(device=opt.gpu_ids[0])
        self.input_B = self.Tensor(opt.batchSize, opt.output_nc,
                                   opt.fineSize, opt.fineSize).cuda(device=opt.gpu_ids[0])

        # define networks
        self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                      opt.which_model_netG, opt.norm, opt.use_dropout, self.gpu_ids)
        if self.isTrain:
            use_sigmoid = opt.no_lsgan # do not use least square GAN by default
            self.netD = networks.define_D(opt.input_nc + opt.output_nc, 
                                          opt.ndf, opt.which_model_netD,
                                          opt.n_layers_D, opt.norm, use_sigmoid, self.gpu_ids)
        
        # load network if continue training / in test phase
        if not self.isTrain or opt.continue_train:
            self.load_network(self.netG, 'G', opt.which_epoch)
            if self.isTrain:
                self.load_network(self.netD, 'D', opt.which_epoch)

        if self.isTrain:
            self.fake_AB_pool = ImagePool(opt.pool_size)
            self.old_lr = opt.lr
            # define loss functions
            self.criterionGAN  = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor, gpu_ids=opt.gpu_ids)
            self.criterionL1   = torch.nn.L1Loss()
            
            if opt.use_prcp:
                self.criterionPrcp = networks.PrcpLoss(opt.weight_path, opt.bias_path, opt.perceptual_level, tensor=self.Tensor, gpu_ids=opt.gpu_ids)

            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr, betas=(opt.beta1, 0.999))

            print('---------- Networks initialized -------------')
            networks.print_network(self.netG)
            networks.print_network(self.netD)
            print('-----------------------------------------------')
示例#4
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        if opt.resize_or_crop != 'none' or not opt.isTrain:  # when training at full res this causes OOM
            torch.backends.cudnn.benchmark = True
        self.isTrain = opt.isTrain
        input_nc = opt.label_nc if opt.label_nc != 0 else opt.input_nc

        ##### define networks
        # Generator network
        netG_input_nc = input_nc
        # Main Generator
        self.netG = networks.define_G(11,
                                      opt.output_nc,
                                      opt.ngf,
                                      opt.netG,
                                      opt.n_downsample_global,
                                      opt.n_blocks_global,
                                      opt.n_local_enhancers,
                                      opt.n_blocks_local,
                                      opt.norm,
                                      gpu_ids=self.gpu_ids)

        self.netP = networks.define_P(44,
                                      20,
                                      opt.ngf,
                                      opt.netG,
                                      opt.n_downsample_global,
                                      opt.n_blocks_global,
                                      opt.n_local_enhancers,
                                      opt.n_blocks_local,
                                      opt.norm,
                                      gpu_ids=self.gpu_ids)
        self.netP.load_state_dict(
            torch.load(
                os.path.dirname(os.path.realpath(__file__)) +
                "/checkpoints/generate/parse.pth"))

        # Discriminator network
        if self.isTrain:
            use_sigmoid = opt.no_lsgan
            netD_input_nc = input_nc + opt.output_nc
            netB_input_nc = opt.output_nc * 2
            self.netD = networks.define_D(netD_input_nc,
                                          opt.ndf,
                                          opt.n_layers_D,
                                          opt.norm,
                                          use_sigmoid,
                                          opt.num_D,
                                          not opt.no_ganFeat_loss,
                                          gpu_ids=self.gpu_ids)
            #self.netB = networks.define_B(netB_input_nc, opt.output_nc, 32, 3, 3, opt.norm, gpu_ids=self.gpu_ids)

        if self.opt.verbose:
            print('---------- Networks initialized -------------')

        # load networks
        if not self.isTrain or opt.continue_train or opt.load_pretrain:
            pretrained_path = '' if not self.isTrain else opt.load_pretrain
            self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path)

            if self.isTrain:
                self.load_network(self.netD, 'D', opt.which_epoch,
                                  pretrained_path)

        # set loss functions and optimizers
        if self.isTrain:
            if opt.pool_size > 0 and (len(self.gpu_ids)) > 1:
                raise NotImplementedError(
                    "Fake Pool Not Implemented for MultiGPU")
            self.fake_pool = ImagePool(opt.pool_size)
            self.old_lr = opt.lr

            # define loss functions
            self.loss_filter = self.init_loss_filter(not opt.no_ganFeat_loss,
                                                     not opt.no_vgg_loss)

            self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan,
                                                 tensor=self.Tensor)
            self.criterionFeat = torch.nn.L1Loss()
            if not opt.no_vgg_loss:
                self.criterionVGG = networks.VGGLoss(self.gpu_ids)
            self.criterionStyle = networks.StyleLoss(self.gpu_ids)
            # Names so we can breakout loss
            self.loss_names = self.loss_filter('G_GAN', 'G_GAN_Feat', 'G_VGG',
                                               'D_real', 'D_fake')
            # initialize optimizers
            # optimizer G
            if opt.niter_fix_global > 0:
                import sys
                if sys.version_info >= (3, 0):
                    finetune_list = set()
                else:
                    from sets import Set
                    finetune_list = Set()

                params_dict = dict(self.netG.named_parameters())
                params = []
                for key, value in params_dict.items():
                    if key.startswith('model' + str(opt.n_local_enhancers)):
                        params += [value]
                        finetune_list.add(key.split('.')[0])
                print(
                    '------------- Only training the local enhancer network (for %d epochs) ------------'
                    % opt.niter_fix_global)
                print('The layers that are finetuned are ',
                      sorted(finetune_list))
            else:
                params = list(self.netG.parameters())+list(self.netimage.parameters())+list(self.netcolor.parameters())\
                         +list(self.netlabel.parameters())+list(self.netsketch.parameters())+list(self.classfier.parameters())
                # params.extend(list(self.netimage.parameters()))

            self.optimizer_G = torch.optim.Adam(params,
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))

            # optimizer D
            params = list(self.netD.parameters())
            self.optimizer_D = torch.optim.Adam(params,
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
示例#5
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)

        self.model = opt.model
        if 'EFG' in self.model:
            self.which_model = 'EFG'
        elif 'NFG' in self.model:
            self.which_model = 'NFG'
        else:
            raise ValueError("model %s is not supported." % opt.model)
        self.input_nc = opt.input_nc
        self.output_nc = opt.output_nc
        self.nfg = opt.nfg
        self.batch_size = opt.batch_size
        if self.nfg == 128:
            self.num_downs = 7
        elif self.nfg == 64:
            self.num_downs = 6
        else:
            raise ValueError("Only support nfg = 128 or 64. Got %d" % self.nfg)
        # configuration of NFG network
        if opt.isTrain:
            self.dropout = opt.dropout
            self.use_sigmoid = False
            self.norm = functools.partial(nn.BatchNorm2d, affine=True)
            self.lr_adam = opt.learning_rate_adam
            self.lr_rmsprop = opt.learning_rate_rmsprop
            self.lam_cyc = opt.lam_cyc
            self.lam_l1 = opt.lam_l1
            self.lam_idt = opt.lam_idt
            self.beta1 = opt.beta1
            self.beta2 = opt.beta2
            self.criterionGAN = nn.MSELoss()
            self.criterionL1 = nn.L1Loss()

        # setup input
        self.input_A = self.Tensor(opt.batch_size, opt.input_nc, opt.img_size,
                                   opt.img_size)
        self.input_B = self.Tensor(opt.batch_size, opt.input_nc, opt.img_size,
                                   opt.img_size)
        if 'EFG' in self.model:
            self.expression_label = self.Tensor(opt.batch_size, 3, 8, 8)
        # build up network
        self.net_G_AtoB = Unet_G2(self.input_nc,
                                  self.output_nc,
                                  self.which_model,
                                  self.nfg,
                                  norm_layer=self.norm,
                                  use_dropout=self.dropout)
        self.net_G_BtoA = Unet_G2(self.input_nc,
                                  self.output_nc,
                                  self.which_model,
                                  self.nfg,
                                  norm_layer=self.norm,
                                  use_dropout=self.dropout)

        self.net_D_A = NLayerDiscriminator(self.input_nc,
                                           norm_layer=self.norm,
                                           use_sigmoid=self.use_sigmoid)
        self.net_D_B = NLayerDiscriminator(self.input_nc,
                                           norm_layer=self.norm,
                                           use_sigmoid=self.use_sigmoid)

        if torch.cuda.device_count() > 1:
            self.net_G_AtoB = nn.DataParallel(self.net_G_AtoB)
            self.net_G_BtoA = nn.DataParallel(self.net_G_BtoA)
            self.net_D_A = nn.DataParallel(self.net_D_A)
            self.net_D_B = nn.DataParallel(self.net_D_B)
        if torch.cuda.is_available():
            print("Using %d GPUS." % torch.cuda.device_count())
            self.net_G_AtoB.cuda()
            self.net_G_BtoA.cuda()
            self.net_D_A.cuda()
            self.net_D_B.cuda()
        # set up optimizer
        if 'LSGAN' in self.model:
            self.optimizer_G = torch.optim.Adam(
                list(self.net_G_AtoB.parameters()) +
                list(self.net_G_BtoA.parameters()),
                lr=self.lr_adam,
                betas=(self.beta1, self.beta2))
            self.optimizer_D_A = torch.optim.Adam(self.net_D_A.parameters(),
                                                  lr=self.lr_adam,
                                                  betas=(self.beta1,
                                                         self.beta2))
            self.optimizer_D_B = torch.optim.Adam(self.net_D_B.parameters(),
                                                  lr=self.lr_adam,
                                                  betas=(self.beta1,
                                                         self.beta2))
        elif 'WGAN' in self.model:
            self.optimizer_G = torch.optim.RMSprop(
                list(self.net_G_AtoB.parameters()) +
                list(self.net_G_BtoA.parameters()),
                lr=self.lr_rmsprop)
            self.optimizer_D_A = torch.optim.RMSprop(self.net_D_A.parameters(),
                                                     lr=self.lr_rmsprop)
            self.optimizer_D_B = torch.optim.RMSprop(self.net_D_B.parameters(),
                                                     lr=self.lr_rmsprop)
        else:
            raise ValueError('%s is not supported.' % self.model)
        # save generated images
        self.out_dir = opt.out_dir + self.model + '/images/'
        if not os.path.exists(self.out_dir):
            os.makedirs(self.out_dir)

        self.out_loss = opt.out_dir + self.model + '/losses/'
        if not os.path.exists(self.out_loss):
            os.makedirs(self.out_loss)

        # initialize loss lists
        self.loss_G_AGANs = []
        self.loss_G_BGANs = []
        self.loss_cyc_As = []
        self.loss_cyc_Bs = []
        self.loss_D_As = []
        self.loss_D_Bs = []

        print(
            "initializing completed:\n model name: %s\n input_nc: %s\n use_sigmoid: %s\n"
            % (self.model, self.input_nc, self.use_sigmoid))
示例#6
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        self.isTrain = opt.isTrain
        # define tensors
        self.sparse_input_A = self.Tensor(opt.batchSize, opt.input_nc,
                                   opt.sparseSize, opt.sparseSize)
        self.mask_input_A = self.Tensor(opt.batchSize, 1,
                                   opt.fineSize, opt.fineSize)


        self.input_A = self.Tensor(opt.batchSize, opt.input_nc,
                                   opt.fineSize, opt.fineSize)
        self.input_B = self.Tensor(opt.batchSize, opt.output_nc,
                                   opt.fineSize, opt.fineSize)
        self.label = self.Tensor(opt.batchSize,1)
        if opt.nz>0:
            self.noise=self.Tensor(opt.batchSize,opt.nz)
            self.test_noise= self.get_z_random(opt.num_interpolate,opt.nz)
            self.test_noise.normal_(0,0.2)
        # load/define networks
        opt.which_model_netG = 'GAN_stability_Generator'


        self.netG = networks_sparse.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                      opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids,opt)

        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        if self.isTrain:
            use_sigmoid = opt.no_lsgan
            opt.which_model_netD = 'GAN_stability_Discriminator'
            self.netD = networks_sparse.define_D(opt.input_nc + opt.output_nc, opt.ndf,
                                          opt.which_model_netD,
                                          opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids,opt)

            if self.isTrain:
                self.netD = nn.DataParallel(self.netD)
                self.netD.to(device)
            else:
                self.netD.cuda()

        if self.isTrain:
            self.netG = nn.DataParallel(self.netG)
            self.netG.to(device)
        else:
            self.netG.cuda()

        if not self.isTrain or opt.continue_train:
            self.load_network(self.netG, 'G', opt.which_epoch)
            if self.isTrain:
                self.load_network(self.netD, 'D', opt.which_epoch)

        if self.isTrain:
            self.fake_AB_pool = ImagePool(opt.pool_size)
            self.old_lr = opt.lr
            # define loss functions
            self.criterionGAN = WGANLoss(tensor=self.Tensor)
            self.criterionL1 = torch.nn.L1Loss()

            # initialize optimizers
            self.schedulers = []
            self.optimizers = []
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt.lr_g, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr_d, betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
            for optimizer in self.optimizers:
                self.schedulers.append(get_scheduler(optimizer, opt))

        print('---------- Networks initialized -------------')
        print_network(self.netG)
        if self.isTrain:
            print_network(self.netD)
        print('-----------------------------------------------')
示例#7
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        self.isTrain = opt.isTrain

        self.input_A = self.Tensor(opt.batchSize, opt.input_nc,  
                                   opt.fineSize, opt.fineSize).cuda(device=opt.gpu_ids[0])
        self.input_B = self.Tensor(opt.batchSize, opt.output_nc, 
                                   opt.fineSize, opt.fineSize).cuda(device=opt.gpu_ids[0])

        # load/define networks
        # The naming conversion is different from those used in the paper
        # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)

        self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                        opt.which_model_netG, opt.norm, opt.use_dropout, self.gpu_ids)
        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf,
                                        opt.which_model_netG, opt.norm, opt.use_dropout, self.gpu_ids)

        # If this is training phase
        if self.isTrain:
            use_sigmoid = opt.no_lsgan # do not use least square GAN by default
            self.netD_A = networks.define_D(opt.output_nc, opt.ndf,
                                            opt.which_model_netD,
                                            opt.n_layers_D, opt.norm, use_sigmoid, self.gpu_ids)
            self.netD_B = networks.define_D(opt.input_nc, opt.ndf,
                                            opt.which_model_netD,
                                            opt.n_layers_D, opt.norm, use_sigmoid, self.gpu_ids)

        # If this is non-training phase/continue training phase
        if not self.isTrain or opt.continue_train:
            which_epoch = opt.which_epoch
            self.load_network(self.netG_A, 'G_A', which_epoch)
            self.load_network(self.netG_B, 'G_B', which_epoch)
            if self.isTrain:
                self.load_network(self.netD_A, 'D_A', which_epoch)
                self.load_network(self.netD_B, 'D_B', which_epoch)

        if self.isTrain:
            # build up so called history pool
            self.fake_A_pool = ImagePool(opt.pool_size)
            self.fake_B_pool = ImagePool(opt.pool_size)
            self.old_lr = opt.lr

            # define loss functions
            self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor, gpu_ids=opt.gpu_ids)
            self.criterionCycle = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()

            if opt.use_prcp:
                self.criterionPrcp = networks.PrcpLoss(opt.weight_path, opt.bias_path, opt.perceptual_level, tensor=self.Tensor, gpu_ids=opt.gpu_ids)

            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
                                                lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D_A = torch.optim.Adam(self.netD_A.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D_B = torch.optim.Adam(self.netD_B.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))

            print('---------- Networks initialized -------------')
            networks.print_network(self.netG_A)
            networks.print_network(self.netG_B)
            networks.print_network(self.netD_A)
            networks.print_network(self.netD_B)
            print('-----------------------------------------------')