Пример #1
0
    def initialize(self, opt):
        super(PoseParsingModel, self).initialize(opt)
        ###################################
        # create model
        ###################################
        if opt.which_model_PP == 'resnet':
            self.netPP = networks.ResnetGenerator(
                input_nc = self.get_data_dim(opt.pp_input_type),
                output_nc = self.get_data_dim(opt.pp_pose_type),
                ngf = opt.pp_nf,
                norm_layer = networks.get_norm_layer(opt.norm),
                activation = nn.ReLU,
                use_dropout = False,
                n_blocks = opt.pp_nblocks,
                gpu_ids = opt.gpu_ids,
                output_tanh = False,
                )
        elif opt.which_model_PP == 'unet':
            self.netPP = networks.UnetGenerator_v2(
                input_nc = self.get_data_dim(opt.pp_input_type),
                output_nc = self.get_data_dim(opt.pp_pose_type),
                num_downs = 8,
                ngf = opt.pp_nf,
                max_nf = opt.pp_nf*(2**3),
                norm_layer = networks.get_norm_layer(opt.norm),
                use_dropout = False,
                gpu_ids = opt.gpu_ids,
                output_tanh = False,
                )
        else:
            raise NotImplementedError()

        if opt.gpu_ids:
            self.netPP.cuda()
        ###################################
        # init/load model
        ###################################
        if self.is_train and (not opt.continue_train):
            networks.init_weights(self.netPP, init_type=opt.init_type)
        else:
            self.load_network(self.netPP, 'netPP', opt.which_epoch)
        ###################################
        # optimizers and schedulers
        ###################################
        if self.is_train:
            self.optim = torch.optim.Adam(self.netPP.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
            self.optimizers = [self.optim]

            self.schedulers = []
            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))
Пример #2
0
 def initialize(self, opt):
     super(DomainTransferModel, self).initialize()
     ###################################
     # define Enc_A and Dec_B (VUnet)
     ###################################
     self.netA = networks.VariationalUnet(
         input_nc_dec=self.get_pose_dim(opt.pose_type),
         input_nc_enc=3,
         output_nc=3,
         nf=opt.vunet_nf,
         max_nf=opt.vunet_max_nf,
         input_size=opt.fine_size,
         n_latent_scales=opt.vunet_n_latent_scales,
         bottleneck_factor=opt.vunet_bottleneck_factor,
         box_factor=opt.vunet_box_factor,
         n_residual_blocks=2,
         norm_layer=networks.get_norm_layer(opt.norm),
         activation=nn.ReLU(False),
         use_dropout=False,
         gpu_ids=opt.gpu_ids,
     )
     if opt.gpu_ids:
         self.netA.cuda()
     networks.init_weights(self.netA, init_type=opt.init_type)
     ###################################
     # define Enc_B and Dec_B (VAE)
     ###################################
     self.netB = networks.VariationalAutoEncoder(
         input_nc=3,
         output_nc=3,
         nf=opt.vae_nf,
         max_nf=opt.vae_max_nf,
         latent_nf=opt.vae_latent_nf,
         input_size=opt.fine_size,
         bottleneck_factor=opt.vae_bottleneck_factor,
         n_residual_blocks=2,
         norm_layer=networks.get_norm_layer(opt.norm),
         activation=nn.ReLU(False),
         use_dropout=False,
         gpu_ids=opt.gpu_ids,
     )
     if opt.gpu_ids:
         self.netB.cuda()
     network.init_weights(self.netB, init_type=opt.init_type)
     ###################################
     # define feature transfer network
     ###################################
     self.netFT = networks.VUnetLatentTransformer()
Пример #3
0
 def __init__(self, gpu_ids=[0],load_model=None):
     self.gpu = gpu_ids
     norm_layer = networks.get_norm_layer('instance')
     self.net =networks.UnetGenerator(3,1,8,64,use_dropout=False,norm_layer = norm_layer,gpu_ids = gpu_ids)
     if load_model is not None:
         self.net.load_state_dict(torch.load(load_model))
     self.net.cuda(0)
 def _create_stage_1_net(self, opt):
     '''
     stage-1 network should be a pretrained pose transfer model.
     assume it is a vunet for now
     '''
     # load options
     opt_s1 = argparse.Namespace()
     dict_opt_s1 = io.load_json(
         os.path.join('checkpoints', opt.which_model_stage_1,
                      'train_opt.json'))
     opt_s1.__dict__.update(dict_opt_s1)
     self.opt_s1 = opt_s1
     # create model
     if opt_s1.which_model_T == 'vunet':
         self.netT_s1 = networks.VariationalUnet(
             input_nc_dec=self.get_pose_dim(opt_s1.pose_type),
             input_nc_enc=self.get_appearance_dim(opt_s1.appearance_type),
             output_nc=self.get_output_dim(opt_s1.output_type),
             nf=opt_s1.vunet_nf,
             max_nf=opt_s1.vunet_max_nf,
             input_size=opt_s1.fine_size,
             n_latent_scales=opt_s1.vunet_n_latent_scales,
             bottleneck_factor=opt_s1.vunet_bottleneck_factor,
             box_factor=opt_s1.vunet_box_factor,
             n_residual_blocks=2,
             norm_layer=networks.get_norm_layer(opt_s1.norm),
             activation=nn.ReLU(False),
             use_dropout=False,
             gpu_ids=opt.gpu_ids,
             output_tanh=False,
         )
         if opt.gpu_ids:
             self.netT_s1.cuda()
     else:
         raise NotImplementedError()
    def __init__(self, opt):
        """Initialize the pix2pix class.

        Parameters:
            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseModel.__init__(self, opt)
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
        # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
        self.visual_names = ['real_A', 'fake_B', 'real_B']
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
        if self.isTrain:
            self.model_names = ['G', 'D']
        else:  # during test time, only load G
            self.model_names = ['G']
        # define norm layer
        norm_layer = networks.get_norm_layer(opt.norm)
        # define networks (both generator and discriminator)
        self.netG = self.define_net(
            UnetGenerator(opt.input_nc, opt.output_nc, 8, opt.ngf, norm_layer,
                          not opt.no_dropout))

        if self.isTrain:  # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
            self.netD = self.define_net(
                NLayerDiscriminator(opt.input_nc + opt.output_nc,
                                    opt.ndf,
                                    norm_layer=norm_layer))

        if self.isTrain:
            # define loss functions
            self.criterionGAN = GANLoss(opt.gan_mode).to(self.device)
            self.criterionL1 = torch.nn.L1Loss()
            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
    def initialize(self, opt):
        super(SupervisedPoseTransferModel, self).initialize(opt)
        ###################################
        # define transformer
        ###################################
        if opt.which_model_T == 'resnet':
            self.netT = networks.ResnetGenerator(
                input_nc=3 + self.get_pose_dim(opt.pose_type),
                output_nc=3,
                ngf=opt.T_nf,
                norm_layer=networks.get_norm_layer(opt.norm),
                use_dropout=not opt.no_dropout,
                n_blocks=9,
                gpu_ids=opt.gpu_ids)
        elif opt.which_model_T == 'unet':
            self.netT = networks.UnetGenerator_v2(
                input_nc=3 + self.get_pose_dim(opt.pose_type),
                output_nc=3,
                num_downs=8,
                ngf=opt.T_nf,
                norm_layer=networks.get_norm_layer(opt.norm),
                use_dropout=not opt.no_dropout,
                gpu_ids=opt.gpu_ids)
        else:
            raise NotImplementedError()

        if opt.gpu_ids:
            self.netT.cuda()
        networks.init_weights(self.netT, init_type=opt.init_type)
        ###################################
        # define discriminator
        ###################################
        self.use_GAN = self.is_train and opt.loss_weight_gan > 0
        if self.use_GAN > 0:
            self.netD = networks.define_D_from_params(
                input_nc=3 +
                self.get_pose_dim(opt.pose_type) if opt.D_cond else 3,
                ndf=opt.D_nf,
                which_model_netD='n_layers',
                n_layers_D=3,
                norm=opt.norm,
                which_gan=opt.which_gan,
                init_type=opt.init_type,
                gpu_ids=opt.gpu_ids)
        else:
            self.netD = None
        ###################################
        # loss functions
        ###################################
        if self.is_train:
            self.loss_functions = []
            self.schedulers = []
            self.optimizers = []

            self.crit_L1 = nn.L1Loss()
            self.crit_vgg = networks.VGGLoss_v2(self.gpu_ids)
            # self.crit_vgg_old = networks.VGGLoss(self.gpu_ids)
            self.crit_psnr = networks.PSNR()
            self.crit_ssim = networks.SSIM()
            self.loss_functions += [self.crit_L1, self.crit_vgg]
            self.optim = torch.optim.Adam(self.netT.parameters(),
                                          lr=opt.lr,
                                          betas=(opt.beta1, opt.beta2))
            self.optimizers += [self.optim]

            if self.use_GAN:
                self.crit_GAN = networks.GANLoss(
                    use_lsgan=opt.which_gan == 'lsgan', tensor=self.Tensor)
                self.optim_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr_D,
                                                betas=(opt.beta1, opt.beta2))
                self.loss_functions.append(self.use_GAN)
                self.optimizers.append(self.optim_D)
            # todo: add pose loss
            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))

            self.fake_pool = ImagePool(opt.pool_size)

        ###################################
        # load trained model
        ###################################
        if not self.is_train:
            self.load_network(self.netT, 'netT', opt.which_model)
Пример #7
0
    def initialize(self, opt):
        super(VUnetPoseTransferModel, self).initialize(opt)
        ###################################
        # define transformer
        ###################################
        self.netT = networks.VariationalUnet(
            input_nc_dec = self.get_pose_dim(opt.pose_type),
            input_nc_enc = self.get_appearance_dim(opt.appearance_type),
            output_nc = self.get_output_dim(opt.output_type),
            nf = opt.vunet_nf,
            max_nf = opt.vunet_max_nf,
            input_size = opt.fine_size,
            n_latent_scales = opt.vunet_n_latent_scales,
            bottleneck_factor = opt.vunet_bottleneck_factor,
            box_factor = opt.vunet_box_factor,
            n_residual_blocks = 2,
            norm_layer = networks.get_norm_layer(opt.norm),
            activation = nn.ReLU(False),
            use_dropout = False,
            gpu_ids = opt.gpu_ids,
            output_tanh = False,
            )
        if opt.gpu_ids:
            self.netT.cuda()
        networks.init_weights(self.netT, init_type=opt.init_type)
        ###################################
        # define discriminator
        ###################################
        self.use_GAN = self.is_train and opt.loss_weight_gan > 0
        if self.use_GAN:
            self.netD = networks.define_D_from_params(
                input_nc=3+self.get_pose_dim(opt.pose_type) if opt.D_cond else 3,
                ndf=opt.D_nf,
                which_model_netD='n_layers',
                n_layers_D=opt.D_n_layer,
                norm=opt.norm,
                which_gan=opt.which_gan,
                init_type=opt.init_type,
                gpu_ids=opt.gpu_ids)
        else:
            self.netD = None
        ###################################
        # loss functions
        ###################################
        self.crit_psnr = networks.PSNR()
        self.crit_ssim = networks.SSIM()

        if self.is_train:
            self.optimizers =[]
            self.crit_vgg = networks.VGGLoss_v2(self.gpu_ids, opt.content_layer_weight, opt.style_layer_weight, opt.shifted_style)
            # self.crit_vgg_old = networks.VGGLoss(self.gpu_ids)
            self.optim = torch.optim.Adam(self.netT.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2), weight_decay=opt.weight_decay)
            self.optimizers += [self.optim]

            if self.use_GAN:
                self.crit_GAN = networks.GANLoss(use_lsgan=opt.which_gan=='lsgan', tensor=self.Tensor)
                self.optim_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr_D, betas=(opt.beta1, opt.beta2))
                self.optimizers.append(self.optim_D)
            # todo: add pose loss
            self.fake_pool = ImagePool(opt.pool_size)

        ###################################
        # load trained model
        ###################################
        if not self.is_train:
            self.load_network(self.netT, 'netT', opt.which_epoch)
        elif opt.continue_train:
            self.load_network(self.netT, 'netT', opt.which_epoch)
            self.load_optim(self.optim, 'optim', opt.which_epoch)
            if self.use_GAN:
                self.load_network(self.netD, 'netD', opt.which_epoch)
                self.load_optim(self.optim_D, 'optim_D', opt.which_epoch)
        ###################################
        # schedulers
        ###################################
        if self.is_train:
            self.schedulers = []
            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))
    def initialize(self, opt):
        super(TwoStagePoseTransferModel, self).initialize(opt)
        ###################################
        # load pretrained stage-1 (coarse) network
        ###################################
        self._create_stage_1_net(opt)
        ###################################
        # define stage-2 (refine) network
        ###################################
        # local patch encoder
        if opt.which_model_s2e == 'patch_embed':
            self.netT_s2e = networks.LocalPatchEncoder(
                n_patch=len(opt.patch_indices),
                input_nc=3,
                output_nc=opt.s2e_nof,
                nf=opt.s2e_nf,
                max_nf=opt.s2e_max_nf,
                input_size=opt.patch_size,
                bottleneck_factor=opt.s2e_bottleneck_factor,
                n_residual_blocks=2,
                norm_layer=networks.get_norm_layer(opt.norm),
                activation=nn.ReLU(False),
                use_dropout=False,
                gpu_ids=opt.gpu_ids,
            )
            s2e_nof = opt.s2e_nof
        elif opt.which_model_s2e == 'patch':
            self.netT_s2e = networks.LocalPatchRearranger(
                n_patch=len(opt.patch_indices),
                image_size=opt.fine_size,
            )
            s2e_nof = 3
        elif opt.which_model_s2e == 'seg_embed':
            self.netT_s2e = networks.SegmentRegionEncoder(
                seg_nc=self.opt.seg_nc,
                input_nc=3,
                output_nc=opt.s2e_nof,
                nf=opt.s2d_nf,
                input_size=opt.fine_size,
                n_blocks=3,
                norm_layer=networks.get_norm_layer(opt.norm),
                activation=nn.ReLU,
                use_dropout=False,
                grid_level=opt.s2e_grid_level,
                gpu_ids=opt.gpu_ids,
            )
            s2e_nof = opt.s2e_nof + opt.s2e_grid_level
        else:
            raise NotImplementedError()
        if opt.gpu_ids:
            self.netT_s2e.cuda()

        # decoder
        if self.opt.which_model_s2d == 'resnet':
            self.netT_s2d = networks.ResnetGenerator(
                input_nc=3 + s2e_nof,
                output_nc=3,
                ngf=opt.s2d_nf,
                norm_layer=networks.get_norm_layer(opt.norm),
                activation=nn.ReLU,
                use_dropout=False,
                n_blocks=opt.s2d_nblocks,
                gpu_ids=opt.gpu_ids,
                output_tanh=False,
            )
        elif self.opt.which_model_s2d == 'unet':
            self.netT_s2d = networks.UnetGenerator_v2(
                input_nc=3 + s2e_nof,
                output_nc=3,
                num_downs=8,
                ngf=opt.s2d_nf,
                max_nf=opt.s2d_nf * 2**3,
                norm_layer=networks.get_norm_layer(opt.norm),
                use_dropout=False,
                gpu_ids=opt.gpu_ids,
                output_tanh=False,
            )
        elif self.opt.which_model_s2d == 'rpresnet':
            self.netT_s2d = networks.RegionPropagationResnetGenerator(
                input_nc=3 + s2e_nof,
                output_nc=3,
                ngf=opt.s2d_nf,
                norm_layer=networks.get_norm_layer(opt.norm),
                activation=nn.ReLU,
                use_dropout=False,
                nblocks=opt.s2d_nblocks,
                gpu_ids=opt.gpu_ids,
                output_tanh=False)
        else:
            raise NotImplementedError()
        if opt.gpu_ids:
            self.netT_s2d.cuda()
        ###################################
        # define discriminator
        ###################################
        self.use_GAN = self.is_train and opt.loss_weight_gan > 0
        if self.use_GAN:
            self.netD = networks.define_D_from_params(
                input_nc=3 +
                self.get_pose_dim(opt.pose_type) if opt.D_cond else 3,
                ndf=opt.D_nf,
                which_model_netD='n_layers',
                n_layers_D=opt.D_n_layer,
                norm=opt.norm,
                which_gan=opt.which_gan,
                init_type=opt.init_type,
                gpu_ids=opt.gpu_ids)
        else:
            self.netD = None
        ###################################
        # loss functions
        ###################################
        self.crit_psnr = networks.PSNR()
        self.crit_ssim = networks.SSIM()

        if self.is_train:
            self.optimizers = []
            self.crit_vgg = networks.VGGLoss_v2(self.gpu_ids,
                                                opt.content_layer_weight,
                                                opt.style_layer_weight,
                                                opt.shifted_style)

            self.optim = torch.optim.Adam([{
                'params': self.netT_s2e.parameters()
            }, {
                'params': self.netT_s2d.parameters()
            }],
                                          lr=opt.lr,
                                          betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim)

            if opt.train_s1:
                self.optim_s1 = torch.optim.Adam(self.netT_s1.parameters(),
                                                 lr=opt.lr_s1,
                                                 betas=(opt.beta1, opt.beta2))
                self.optimizers.append(self.optim_s1)

            if self.use_GAN:
                self.crit_GAN = networks.GANLoss(
                    use_lsgan=opt.which_gan == 'lsgan', tensor=self.Tensor)
                self.optim_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr_D,
                                                betas=(opt.beta1, opt.beta2))
                self.optimizers.append(self.optim_D)
                self.fake_pool = ImagePool(opt.pool_size)
        ###################################
        # init/load model
        ###################################
        if self.is_train:
            if not opt.continue_train:
                self.load_network(self.netT_s1, 'netT', 'latest',
                                  self.opt_s1.id)
                networks.init_weights(self.netT_s2e, init_type=opt.init_type)
                networks.init_weights(self.netT_s2d, init_type=opt.init_type)
                if self.use_GAN:
                    networks.init_weights(self.netD, init_type=opt.init_type)
            else:
                self.load_network(self.netT_s1, 'netT_s1', opt.which_epoch)
                self.load_network(self.netT_s2e, 'netT_s2e', opt.which_epoch)
                self.load_network(self.netT_s2d, 'netT_s2d', opt.which_epoch)
                self.load_optim(self.optim, 'optim', opt.which_epoch)
                if self.use_GAN:
                    self.load_network(self.netD, 'netD', opt.which_epoch)
                    self.load_optim(self.optim_D, 'optim_D', opt.which_epoch)
        else:
            self.load_network(self.netT_s1, 'netT_s1', opt.which_epoch)
            self.load_network(self.netT_s2e, 'netT_s2e', opt.which_epoch)
            self.load_network(self.netT_s2d, 'netT_s2d', opt.which_epoch)
        ###################################
        # schedulers
        ###################################
        if self.is_train:
            self.schedulers = []
            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))