示例#1
0
文件: dxdy_model.py 项目: JPlin/SPADE
 def initialize_networks(self, opt):
     self.netG = networks.define_G(opt)
     self.netD = networks.define_D(opt)
     # set require gradients
     if self.isTrain:
         self.set_requires_grad([self.netG, self.netD], True)
     else:
         self.set_requires_grad([self.netG, self.netD], False)
     if self.use_gpu:
         self.netG = DataParallelWithCallback(self.netG,
                                              device_ids=opt['gpu_ids'])
         self.netD = DataParallelWithCallback(self.netD,
                                              device_ids=opt['gpu_ids'])
     self.train_nets = [self.netG, self.netD]
示例#2
0
    def __init__(self, opt):
        self.opt = opt
        self.pix2pix_model = Pix2PixModel(opt)
        if len(opt.gpu_ids) > 0:
            self.pix2pix_model = DataParallelWithCallback(
                self.pix2pix_model, device_ids=opt.gpu_ids)
            self.pix2pix_model_on_one_gpu = self.pix2pix_model.module
        else:
            self.pix2pix_model_on_one_gpu = self.pix2pix_model

        self.generated = None
        if opt.isTrain:
            if not opt.unpairTrain:
                (
                    self.optimizer_G,
                    self.optimizer_D,
                ) = self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            else:
                (
                    self.optimizer_G,
                    self.optimizer_D,
                    self.optimizer_D2,
                ) = self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr

        self.d_losses = {}
        self.nanCount = 0
    def __init__(self, opt, resume_epoch=0):
        self.opt = opt
        self.pix2pix_model = Pix2PixModel(opt)
        if len(opt.gpu_ids) > 1:
            self.pix2pix_model = DataParallelWithCallback(
                self.pix2pix_model, device_ids=opt.gpu_ids)
            self.pix2pix_model_on_one_gpu = self.pix2pix_model.module
        else:
            self.pix2pix_model.to(opt.gpu_ids[0])
            self.pix2pix_model_on_one_gpu = self.pix2pix_model

        if opt.use_ema:
            self.netG_ema = EMA(opt.ema_beta)
            for name, param in self.pix2pix_model_on_one_gpu.net[
                    'netG'].named_parameters():
                if param.requires_grad:
                    self.netG_ema.register(name, param.data)
            self.netCorr_ema = EMA(opt.ema_beta)
            for name, param in self.pix2pix_model_on_one_gpu.net[
                    'netCorr'].named_parameters():
                if param.requires_grad:
                    self.netCorr_ema.register(name, param.data)

        self.generated = None
        if opt.isTrain:
            self.optimizer_G, self.optimizer_D = \
                self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr
            if opt.continue_train and opt.which_epoch == 'latest':
                checkpoint = torch.load(
                    os.path.join(opt.checkpoints_dir, opt.name,
                                 'optimizer.pth'))
                self.optimizer_G.load_state_dict(checkpoint['G'])
                self.optimizer_D.load_state_dict(checkpoint['D'])
        self.last_data, self.last_netCorr, self.last_netG, self.last_optimizer_G = None, None, None, None
示例#4
0
    def initialize_networks(self, opt):
        self.netGA = networks.define_G(opt, opt['netGA'])
        self.netGB = networks.define_G(opt, opt['netGB'])
        self.netDA = networks.define_D(opt, opt['netDA'])
        self.netDB = networks.define_D(opt, opt['netDB'])
        self.netEA, self.netHairA = networks.define_RES(
            opt, opt['input_nc_A'], opt['netEDA'])
        self.netEB, self.netHairB = networks.define_RES(
            opt, opt['input_nc_B'], opt['netEDB'])

        if self.opt['pretrain']:
            self.train_nets = [
                self.netGA, self.netGB, self.netDA, self.netDB, self.netEA,
                self.netHairA, self.netEB, self.netHairB
            ]
        else:
            self.train_nets = [self.netEA, self.netHairA]

        # set require gradients
        if self.isTrain:
            self.set_requires_grad(self.train_nets, True)
        else:
            self.set_requires_grad(self.train_nets, False)

        if self.use_gpu:
            for i in range(len(self.train_nets)):
                self.train_nets[i] = DataParallelWithCallback(
                    self.train_nets[i], device_ids=opt['gpu_ids'])
            if self.opt['pretrain']:
                self.netGA, self.netGB, self.netDA, self.netDB, self.netEA, \
                    self.netHairA, self.netEB, self.netHairB = self.train_nets
            else:
                self.netEA, self.netHairA = self.train_nets
示例#5
0
    def __init__(self, opt):
        self.opt = opt
        self.pix2pix_model = Pix2PixModel(opt)
        #self.pix2pix_model = torch.nn.parallel.DistributedDataParallel(self.pix2pix_model,device_ids=[opt.gpu], find_unused_parameters=True)
        self.pix2pix_model = DataParallelWithCallback(self.pix2pix_model,
                                                      device_ids=opt.gpu_ids)
        self.pix2pix_model_on_one_gpu = self.pix2pix_model.module

        self.generated = None
        if opt.isTrain:
            self.optimizer_G, self.optimizer_D = self.pix2pix_model_on_one_gpu.create_optimizers(
                opt)
            self.old_lr = opt.lr
    def __init__(self, opt):
        self.opt = opt
        self.dual_model = DualModel(opt)
        if len(opt.gpu_ids) > 0:
            self.dual_model = DataParallelWithCallback(self.dual_model,
                                                       device_ids=opt.gpu_ids)
            self.dual_model_on_one_gpu = self.dual_model.module
        else:
            self.dual_model_on_one_gpu = self.dual_model

        self.generated = None
        if opt.isTrain:
            self.optimizer_G, self.optimizer_D1, self.optimizer_D2 = \
                self.dual_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr
示例#7
0
    def __init__(self, opt, NADS_Net):
        self.opt = opt
        self.gan_model = GAN_Model(opt, NADS_Net)
        if len(opt.gpu_ids) > 0:
            self.gan_model = DataParallelWithCallback(self.gan_model,
                                                      device_ids=opt.gpu_ids)
            self.pix2pix_model_on_one_gpu = self.gan_model.module
        else:
            self.pix2pix_model_on_one_gpu = self.gan_model

        self.generated = None
        if opt.isTrain:
            self.optimizer_G, self.optimizer_D = \
                self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr
示例#8
0
    def __init__(self, opt, model):
        super(MyModel, self).__init__()
        self.opt = opt
        model = model.cuda(opt.gpu_ids[0])
        self.module = model

        self.model = DataParallelWithCallback(model, device_ids=opt.gpu_ids)
        if opt.batch_for_first_gpu != -1:
            self.bs_per_gpu = (opt.batchSize - opt.batch_for_first_gpu) // (
                len(opt.gpu_ids) - 1)  # batch size for each GPU
        else:
            self.bs_per_gpu = int(
                np.ceil(float(opt.batchSize) /
                        len(opt.gpu_ids)))  # batch size for each GPU
        self.pad_bs = self.bs_per_gpu * len(opt.gpu_ids) - opt.batchSize
示例#9
0
    def __init__(self, opt):
        self.opt = opt
        self.seg_inpaint_model = SegInpaintModel(opt)
        if len(opt.gpu_ids) > 0:
            self.seg_inpaint_model = DataParallelWithCallback(
                self.seg_inpaint_model, device_ids=opt.gpu_ids)
            self.seg_inpaint_model_on_one_gpu = self.seg_inpaint_model.module
        else:
            self.seg_inpaint_model_on_one_gpu = self.seg_inpaint_model

        self.generated = None

        self.optimizer_SPNet, self.optimizer_SGNet, self.optimizer_D_seg, self.optimizer_D_img = \
            self.seg_inpaint_model_on_one_gpu.create_optimizers(opt)

        self.old_lr = opt.lr
示例#10
0
 def __init__(self, opt, model):        
     super(MyModel, self).__init__()
     self.opt = opt
     model = model.cuda(opt.gpu_ids[0])
     self.module = model
     
     if opt.distributed:            
         self.model = nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
     else:
         #self.model = nn.DataParallel(model, device_ids=opt.gpu_ids)    
         self.model = DataParallelWithCallback(model, device_ids=opt.gpu_ids)
     if opt.batch_for_first_gpu != -1:
         self.bs_per_gpu = (opt.batchSize - opt.batch_for_first_gpu) // (len(opt.gpu_ids) - 1) # batch size for each GPU
     else:
         self.bs_per_gpu = int(np.ceil(float(opt.batchSize) / len(opt.gpu_ids))) # batch size for each GPU
     self.pad_bs = self.bs_per_gpu * len(opt.gpu_ids) - opt.batchSize           
示例#11
0
    def __init__(self, opt):
        self.opt = opt
        self.mask_decay = None  # Must be filled from outside
        self.pix2pix_model = Pix2PixModel(opt)
        if len(opt.gpu_ids) > 0:
            self.pix2pix_model = DataParallelWithCallback(
                self.pix2pix_model, device_ids=opt.gpu_ids)
            self.pix2pix_model_on_one_gpu = self.pix2pix_model.module
        else:
            self.pix2pix_model_on_one_gpu = self.pix2pix_model

        self.generated = None
        if opt.isTrain:
            self.optimizer_G, self.optimizer_D = \
                self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr
示例#12
0
 def __init__(self, opt):
     self.opt = opt
     self.pix2pix_model = create_model(opt)
     if len(opt.gpu_ids) > 0:
         self.pix2pix_model = DataParallelWithCallback(
             self.pix2pix_model,
             device_ids=opt.gpu_ids,
             output_device=opt.gpu_ids[-1],
             chunk_size=opt.chunk_size)
         self.pix2pix_model_on_one_gpu = self.pix2pix_model.module
     else:
         self.pix2pix_model_on_one_gpu = self.pix2pix_model
     # self.Render = networks.Render(opt, render_size=opt.crop_size)
     self.generated = None
     if opt.isTrain:
         self.optimizer_G, self.optimizer_D = \
             self.pix2pix_model_on_one_gpu.create_optimizers(opt)
         self.old_lr = opt.lr
示例#13
0
    def __init__(self, opt):
        self.opt = opt
        self.pix2pix_model = Pix2PixModel(opt)
        if len(opt.gpu_ids) > 0:
            self.pix2pix_model = DataParallelWithCallback(self.pix2pix_model,
                                                          device_ids=opt.gpu_ids)
            self.pix2pix_model_on_one_gpu = self.pix2pix_model.module
        else:
            self.pix2pix_model_on_one_gpu = self.pix2pix_model

        self.generated = None
        if opt.isTrain:
            self.optimizer_G, self.optimizer_D = \
                self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr

        self.amp = True if AMP and opt.use_amp else False
        
        if self.amp:
            self.scaler_G = GradScaler()
            self.scaler_D = GradScaler()
示例#14
0
    def __init__(self, opt):
        self.opt = opt
        if self.opt.model == 'pix2pix':
            self.pix2pix_model = Pix2pixModel(opt)
        elif self.opt.model == 'smis':
            self.pix2pix_model = SmisModel(opt)
        print(self.pix2pix_model)
        with open(os.path.join(opt.checkpoints_dir, opt.name, 'model.txt'),
                  'w') as f:
            f.write(self.pix2pix_model.__str__())
        if len(opt.gpu_ids) > 0:
            self.pix2pix_model = DataParallelWithCallback(
                self.pix2pix_model, device_ids=opt.gpu_ids)
            self.pix2pix_model_on_one_gpu = self.pix2pix_model.module
        else:
            self.pix2pix_model_on_one_gpu = self.pix2pix_model

        self.generated = None
        if opt.isTrain:
            self.optimizer_G, self.optimizer_D = \
                self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr
示例#15
0
    def __init__(self, opt):
        self.opt = opt
        if self.opt.dual:
            from models.pix2pix_dualmodel import Pix2PixModel
        elif self.opt.dual_segspade:
            from models.pix2pix_dual_segspademodel import Pix2PixModel
        elif opt.box_unpair:
            from models.pix2pix_dualunpair import Pix2PixModel
        else:
            from models.pix2pix_model import Pix2PixModel

        self.pix2pix_model = Pix2PixModel(opt)
        if len(opt.gpu_ids) > 0:
            self.pix2pix_model = DataParallelWithCallback(
                self.pix2pix_model, device_ids=opt.gpu_ids)
            self.pix2pix_model_on_one_gpu = self.pix2pix_model.module
        else:
            self.pix2pix_model_on_one_gpu = self.pix2pix_model

        self.generated = None
        if opt.isTrain:
            self.optimizer_G, self.optimizer_D = \
                self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr