Пример #1
0
    def __init__(self, opt):
        BaseModel.__init__(self, opt)
        self.loss_names = ['mpjpe']
        self.model_names = ['G']
        self.FloatTensor = torch.cuda.FloatTensor if len(self.gpu_ids)>0 \
            else torch.FloatTensor
        self.ByteTensor = torch.cuda.ByteTensor if len(self.gpu_ids)>0 \
            else torch.ByteTensor

        self.net_G = network.define_g(opt, filename='generator', structure_nc=opt.structure_nc, channels=256, layers=4)
        if len(opt.gpu_ids) > 1:
            self.net_G = torch.nn.DataParallel(self.net_G, device_ids=self.gpu_ids)

        self.convert2skeleton = openpose_utils.tensor2skeleton()

        if self.isTrain:
            self.L2loss = torch.nn.MSELoss()
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                                               filter(lambda p: p.requires_grad, self.net_G.parameters())),
                                               lr=opt.lr, betas=(0.0, 0.999))
            self.optimizers.append(self.optimizer_G)
        else:
            self.L2loss = torch.nn.MSELoss()
            util.mkdir(os.path.join(self.opt.results_dir))

        self.setup(opt)
Пример #2
0
    def __init__(self, opt):
        BaseModel.__init__(self, opt)
        self.loss_names = ['correctness','regularization']
        self.visual_names = ['input_P1','input_P2', 'warp', 'flow_fields',
                            'masks']
        self.model_names = ['G']

        self.FloatTensor = torch.cuda.FloatTensor if len(self.gpu_ids)>0 \
            else torch.FloatTensor
        self.ByteTensor = torch.cuda.ByteTensor if len(self.gpu_ids)>0 \
            else torch.ByteTensor
        self.net_G = network.define_g(opt, structure_nc=opt.structure_nc, ngf=32, img_f=256, 
                                       encoder_layer=5, norm='instance', activation='LeakyReLU', 
                                       attn_layer=self.opt.attn_layer, use_spect=opt.use_spect_g,
                                       )
        self.L1loss = torch.nn.L1Loss()
        self.flow2color = util.flow2color()

        if self.isTrain:
            self.Correctness = external_function.PerceptualCorrectness().to(opt.device)
            self.Regularization = external_function.MultiAffineRegularizationLoss(kz_dic=opt.kernel_size).to(opt.device)
            self.optimizer_G = torch.optim.Adam(itertools.chain(filter(lambda p: p.requires_grad, self.net_G.parameters())),
                                                lr=opt.lr, betas=(0.0, 0.999))
            self.optimizers.append(self.optimizer_G)
        self.setup(opt)
    def __init__(self, opt):
        """Initial the pluralistic model"""
        BaseModel.__init__(self, opt)
        self.loss_names = ['app_gen','correctness_p', 'correctness_r','content_gen','style_gen',
                            'regularization_p', 'regularization_r',
                            'ad_gen','dis_img_gen',
                            'ad_gen_v', 'dis_img_gen_v']

        self.visual_names = ['P_reference','BP_reference', 'P_frame_step','BP_frame_step','img_gen', 'flow_fields', 'masks']        
        self.model_names = ['G','D','D_V']

        self.FloatTensor = torch.cuda.FloatTensor if len(self.gpu_ids)>0 \
            else torch.FloatTensor
        self.ByteTensor = torch.cuda.ByteTensor if len(self.gpu_ids)>0 \
            else torch.ByteTensor

        # define the Animation model
        self.net_G = network.define_g(opt, image_nc=opt.image_nc, structure_nc=opt.structure_nc, ngf=64, img_f=512,
                                      layers=opt.layers, num_blocks=2, use_spect=opt.use_spect_g, attn_layer=opt.attn_layer, 
                                      norm='instance', activation='LeakyReLU', extractor_kz=opt.kernel_size)
        if len(opt.gpu_ids) > 1:
            self.net_G = torch.nn.DataParallel(self.net_G, device_ids=self.gpu_ids)

        self.flow2color = util.flow2color()

        self.net_D = network.define_d(opt, ndf=32, img_f=128, layers=4, use_spect=opt.use_spect_d)
        if len(opt.gpu_ids) > 1:
            self.net_D = torch.nn.DataParallel(self.net_D, device_ids=self.gpu_ids)

        input_nc = (opt.frames_D_V-1) * opt.image_nc
        self.net_D_V = network.define_d(opt, input_nc=input_nc, ndf=32, img_f=128, layers=4, use_spect=opt.use_spect_d)
        if len(opt.gpu_ids) > 1:
            self.net_D_V = torch.nn.DataParallel(self.net_D_V, device_ids=self.gpu_ids)                

        if self.isTrain:
            # define the loss functions
            self.GANloss = external_function.AdversarialLoss(opt.gan_mode).to(opt.device)
            self.L1loss = torch.nn.L1Loss()
            self.L2loss = torch.nn.MSELoss()
            self.Correctness = external_function.PerceptualCorrectness().to(opt.device)
            self.Regularization = external_function.MultiAffineRegularizationLoss(kz_dic=opt.kernel_size).to(opt.device)
            self.Vggloss = external_function.VGGLoss().to(opt.device)

            # define the optimizer
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                                               filter(lambda p: p.requires_grad, self.net_G.parameters())),
                                               lr=opt.lr, betas=(0.0, 0.999))
            self.optimizers.append(self.optimizer_G)

            self.optimizer_D = torch.optim.Adam(itertools.chain(
                                filter(lambda p: p.requires_grad, self.net_D.parameters()),
                                filter(lambda p: p.requires_grad, self.net_D_V.parameters())),
                                lr=opt.lr*opt.ratio_g2d, betas=(0.0, 0.999))
            self.optimizers.append(self.optimizer_D)
        else:
            self.results_dir_base = self.opt.results_dir
        self.setup(opt)
Пример #4
0
    def __init__(self, opt):
        BaseModel.__init__(self, opt)
        self.opt = opt
        self.keys = ['head', 'body', 'leg']
        self.mask_id = {
            'head': [1, 2, 4, 13],
            'body': [3, 5, 6, 7, 10, 11, 14, 15],
            'leg': [8, 9, 12, 16, 17, 18, 19]
        }
        self.GPU = torch.device('cuda:0')

        self.loss_names = ['correctness', 'regularization']
        self.visual_names = [
            'input_P1', 'input_P2', 'warp', 'flow_fields', 'masks',
            'input_BP1', 'input_BP2'
        ]
        self.model_names = ['G']

        self.FloatTensor = torch.cuda.FloatTensor if len(self.gpu_ids)>0 \
            else torch.FloatTensor
        self.ByteTensor = torch.cuda.ByteTensor if len(self.gpu_ids)>0 \
            else torch.ByteTensor

        self.net_G = network.define_g(
            opt,
            structure_nc=opt.structure_nc,
            ngf=32,
            img_f=256,
            layers=5,
            norm='instance',
            activation='LeakyReLU',
            attn_layer=self.opt.attn_layer,
            use_spect=opt.use_spect_g,
        )
        self.flow2color = util.flow2color()

        if self.isTrain:
            # define the loss functions
            self.Correctness = external_function.PerceptualCorrectness().to(
                opt.device)
            self.Regularization = external_function.MultiAffineRegularizationLoss(
                kz_dic=opt.kernel_size).to(opt.device)
            # define the optimizer
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                filter(lambda p: p.requires_grad, self.net_G.parameters())),
                                                lr=opt.lr,
                                                betas=(0.0, 0.999))
            self.optimizers.append(self.optimizer_G)
        # load the pretrained model and schedulers
        self.setup(opt)
Пример #5
0
    def __init__(self, opt):
        BaseModel.__init__(self, opt)
        self.loss_names = ['app_gen', 'correctness_gen', 'content_gen', 'style_gen', 'regularization',
                           'ad_gen', 'dis_img_gen']

        self.visual_names = ['input_P1','input_P2', 'img_gen', 'flow_fields', 'masks']
        self.model_names = ['G','D']

        self.keys = ['head','body','leg']
        self.mask_id = {'head':[1,2,4,13],'body':[3,5,6,7,10,11,14,15],'leg':[8,9,12,16,17,18,19]}
        self.GPU = torch.device('cuda:0')

        self.FloatTensor = torch.cuda.FloatTensor if len(self.gpu_ids)>0 \
            else torch.FloatTensor

        # define the generator
        self.net_G = network.define_g(opt, image_nc=opt.image_nc, structure_nc=opt.structure_nc, ngf=64, img_f=512,
                                      layers=opt.layers, num_blocks=2, use_spect=opt.use_spect_g, attn_layer=opt.attn_layer, 
                                      norm='instance', activation='LeakyReLU', extractor_kz=opt.kernel_size)

        # define the discriminator 
        if self.opt.dataset_mode == 'fashion':
            self.net_D = network.define_d(opt, ndf=32, img_f=128, layers=4, use_spect=opt.use_spect_d)
        elif self.opt.dataset_mode== 'market':
            self.net_D = network.define_d(opt, ndf=32, img_f=128, layers=3, use_spect=opt.use_spect_d)
        self.flow2color = util.flow2color()

        if self.isTrain:
            # define the loss functions
            self.GANloss = external_function.AdversarialLoss(opt.gan_mode).to(opt.device)
            self.L1loss = torch.nn.L1Loss()
            self.Correctness = external_function.PerceptualCorrectness().to(opt.device)
            self.Regularization = external_function.MultiAffineRegularizationLoss(kz_dic=opt.kernel_size).to(opt.device)
            self.Vggloss = external_function.VGGLoss().to(opt.device)

            # define the optimizer
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                                               filter(lambda p: p.requires_grad, self.net_G.parameters())),
                                               lr=opt.lr, betas=(0.0, 0.999))
            self.optimizers.append(self.optimizer_G)

            self.optimizer_D = torch.optim.Adam(itertools.chain(
                                filter(lambda p: p.requires_grad, self.net_D.parameters())),
                                lr=opt.lr*opt.ratio_g2d, betas=(0.0, 0.999))
            self.optimizers.append(self.optimizer_D)

        # load the pre-trained model and schedulers
        self.setup(opt)
Пример #6
0
    def __init__(self, opt):
        BaseModel.__init__(self, opt)
        self.loss_names = [
            'app_gen', 'correctness_gen', 'content_gen', 'style_gen',
            'regularization', 'ad_gen', 'dis_img_gen'
        ]

        self.visual_names = [
            'input_P1', 'input_P2', 'img_gen', 'flow_fields', 'masks'
        ]
        self.model_names = ['G', 'D']

        self.FloatTensor = torch.cuda.FloatTensor if len(self.gpu_ids)>0 \
            else torch.FloatTensor
        self.ByteTensor = torch.cuda.ByteTensor if len(self.gpu_ids)>0 \
            else torch.ByteTensor

        self.net_G = network.define_g(opt,
                                      image_nc=opt.image_nc,
                                      structure_nc=opt.structure_nc,
                                      ngf=64,
                                      img_f=512,
                                      layers=opt.layers,
                                      num_blocks=2,
                                      use_spect=opt.use_spect_g,
                                      attn_layer=opt.attn_layer,
                                      norm='instance',
                                      activation='LeakyReLU',
                                      extractor_kz=opt.kernel_size)

        self.flow2color = util.flow2color()
        self.net_D = network.define_d(opt,
                                      ndf=32,
                                      img_f=128,
                                      layers=4,
                                      use_spect=opt.use_spect_d)

        if self.isTrain:
            self.GANloss = external_function.AdversarialLoss(opt.gan_mode).to(
                opt.device)
            self.L1loss = torch.nn.L1Loss()
            self.L2loss = torch.nn.MSELoss()
            self.Correctness = external_function.PerceptualCorrectness().to(
                opt.device)
            self.Regularization = external_function.MultiAffineRegularizationLoss(
                kz_dic=opt.kernel_size).to(opt.device)
            self.Vggloss = external_function.VGGLoss().to(opt.device)

            # define the optimizer
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                filter(lambda p: p.requires_grad, self.net_G.parameters())),
                                                lr=opt.lr,
                                                betas=(0.0, 0.999))
            self.optimizers.append(self.optimizer_G)

            self.optimizer_D = torch.optim.Adam(itertools.chain(
                filter(lambda p: p.requires_grad, self.net_D.parameters())),
                                                lr=opt.lr * opt.ratio_g2d,
                                                betas=(0.0, 0.999))
            self.optimizers.append(self.optimizer_D)

        self.setup(opt)
Пример #7
0
    def __init__(self, opt):
        BaseModel.__init__(self, opt)
        self.loss_names = [
            'app_gen',
            'content_gen',
            'style_gen',  #'reg_gen',
            'ad_gen',
            'dis_img_gen',
            'par',
            'par1'
        ]

        self.visual_names = ['input_P1', 'input_P2', 'img_gen']
        self.model_names = ['G', 'D']

        self.FloatTensor = torch.cuda.FloatTensor if len(self.gpu_ids)>0 \
            else torch.FloatTensor

        # define the generator
        self.net_G = network.define_g(opt,
                                      image_nc=opt.image_nc,
                                      structure_nc=opt.structure_nc,
                                      ngf=64,
                                      use_spect=opt.use_spect_g,
                                      norm='instance',
                                      activation='LeakyReLU')

        # define the discriminator
        if self.opt.dataset_mode == 'fashion':
            self.net_D = network.define_d(opt,
                                          ndf=32,
                                          img_f=128,
                                          layers=4,
                                          use_spect=opt.use_spect_d)

        trained_list = ['parnet']
        for k, v in self.net_G.named_parameters():
            flag = False
            for i in trained_list:
                if i in k:
                    flag = True
            if flag:
                #v.requires_grad = False
                print(k)

        if self.isTrain:
            # define the loss functions
            self.GANloss = external_function.AdversarialLoss(opt.gan_mode).to(
                opt.device)
            self.L1loss = torch.nn.L1Loss()
            self.Vggloss = external_function.VGGLoss().to(opt.device)
            self.parLoss = CrossEntropyLoss2d()  #torch.nn.BCELoss()

            # define the optimizer
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                filter(lambda p: p.requires_grad, self.net_G.parameters())),
                                                lr=opt.lr,
                                                betas=(0.9, 0.999))
            self.optimizers.append(self.optimizer_G)

            self.optimizer_D = torch.optim.Adam(itertools.chain(
                filter(lambda p: p.requires_grad, self.net_D.parameters())),
                                                lr=opt.lr * opt.ratio_g2d,
                                                betas=(0.9, 0.999))
            self.optimizers.append(self.optimizer_D)

        # load the pre-trained model and schedulers
        self.setup(opt)
Пример #8
0
    def __init__(self, opt):
        BaseModel.__init__(self, opt)
        self.loss_names = ['app_gen','correctness_p', 'correctness_r',
                           'content_gen','style_gen',
                           'regularization_p','regularization_r',
                           'ad_gen', 'dis_img_gen',
                           'ad_gen_v', 'dis_img_gen_v']

        self.visual_names = ['ref_image', 'ref_skeleton', 'BP_step', 'P_step','img_gen']        
        self.model_names = ['G', 'D', 'D_V']

        self.FloatTensor = torch.cuda.FloatTensor if len(self.gpu_ids)>0 \
            else torch.FloatTensor
        self.ByteTensor = torch.cuda.ByteTensor if len(self.gpu_ids)>0 \
            else torch.ByteTensor
        
        self.net_G = network.define_g(opt, filename='generator', image_nc=opt.image_nc, structure_nc=opt.structure_nc, ngf=64, img_f=512,
                                      layers=opt.layers, num_blocks=2, use_spect=opt.use_spect_g, attn_layer=opt.attn_layer, 
                                      norm='instance', activation='LeakyReLU', extractor_kz=opt.kernel_size)
        if len(opt.gpu_ids) > 1:
            self.net_G = torch.nn.DataParallel(self.net_G, device_ids=self.gpu_ids)


        self.flow2color = util.flow2color()
        self.convert2skeleton = openpose_utils.tensor2skeleton(spatial_draw=True)

        self.net_D = network.define_d(opt, ndf=32, img_f=128, layers=4, use_spect=opt.use_spect_d)
        if len(opt.gpu_ids) > 1:
            self.net_D = torch.nn.DataParallel(self.net_D, device_ids=self.gpu_ids)
        
        self.net_D_V = network.define_d(opt, name=opt.netD_V, input_length=opt.frames_D_V, ndf=32, img_f=128, layers=4, use_spect=opt.use_spect_d)
        if len(opt.gpu_ids) > 1:
            self.net_D_V = torch.nn.DataParallel(self.net_D_V, device_ids=self.gpu_ids)                

        if self.isTrain:
            self.GANloss = external_function.AdversarialLoss(opt.gan_mode).to(opt.device)
            self.L1loss = torch.nn.L1Loss()
            self.Correctness = external_function.PerceptualCorrectness().to(opt.device)

            self.Regularization = external_function.MultiAffineRegularizationLoss(kz_dic=opt.kernel_size).to(opt.device)
            self.Vggloss = external_function.VGGLoss().to(opt.device)

            # define the optimizer
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                                               filter(lambda p: p.requires_grad, self.net_G.parameters())),
                                               lr=opt.lr, betas=(0.0, 0.999))
            self.optimizers.append(self.optimizer_G)

            self.optimizer_D = torch.optim.Adam(itertools.chain(
                                filter(lambda p: p.requires_grad, self.net_D.parameters()),
                                filter(lambda p: p.requires_grad, self.net_D_V.parameters())),
                                lr=opt.lr*opt.ratio_g2d, betas=(0.0, 0.999))
            self.optimizers.append(self.optimizer_D)

            if self.opt.use_mask:
                # use mask to calculate the correctness loss for foreground content
                self.opt.lambda_correct = 2.0
        else:
            self.results_dir_base = self.opt.results_dir
        self.esp=1e-6            
        self.setup(opt)